diff --git a/CHANGELOG.md b/CHANGELOG.md index 37b57765497..7f4fcb60327 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +Release v1.38.16 (2021-04-08) +=== + +### Service Client Updates +* `service/appstream`: Updates service API and documentation + * This release provides support for image updates +* `service/autoscaling`: Updates service API, documentation, and examples + * Amazon EC2 Auto Scaling announces Warm Pools that help applications to scale out faster by pre-initializing EC2 instances and save money by requiring fewer continuously running instances +* `service/customer-profiles`: Updates service documentation +* `service/kinesis-video-archived-media`: Updates service documentation +* `service/lookoutequipment`: Adds new service +* `service/ram`: Updates service documentation +* `service/robomaker`: Updates service API and documentation + Release v1.38.15 (2021-04-07) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index ab819d0ae10..978c077e774 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -4832,6 +4832,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -7810,7 +7826,8 @@ var awscnPartition = partition{ "lakeformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "lambda": service{ @@ -7878,6 +7895,12 @@ var awscnPartition = partition{ }, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "polly": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index 2dd2f7a9370..04056a065b1 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.38.15" +const SDKVersion = "1.38.16" diff --git a/models/apis/appstream/2016-12-01/api-2.json b/models/apis/appstream/2016-12-01/api-2.json index 407874799e7..8ba6464f7c1 100644 --- a/models/apis/appstream/2016-12-01/api-2.json +++ b/models/apis/appstream/2016-12-01/api-2.json @@ -180,6 +180,24 @@ {"shape":"InvalidParameterCombinationException"} ] }, + "CreateUpdatedImage":{ + "name":"CreateUpdatedImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUpdatedImageRequest"}, + "output":{"shape":"CreateUpdatedImageResult"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidAccountStatusException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"IncompatibleImageException"} + ] + }, "CreateUsageReportSubscription":{ "name":"CreateUsageReportSubscription", "http":{ @@ -1015,6 +1033,28 @@ "Expires":{"shape":"Timestamp"} } }, + "CreateUpdatedImageRequest":{ + "type":"structure", + "required":[ + "existingImageName", + "newImageName" + ], + "members":{ + "existingImageName":{"shape":"Name"}, + "newImageName":{"shape":"Name"}, + "newImageDescription":{"shape":"Description"}, + "newImageDisplayName":{"shape":"DisplayName"}, + "newImageTags":{"shape":"Tags"}, + "dryRun":{"shape":"Boolean"} + } + }, + "CreateUpdatedImageResult":{ + "type":"structure", + "members":{ + "image":{"shape":"Image"}, + "canUpdateImage":{"shape":"Boolean"} + } + }, "CreateUsageReportSubscriptionRequest":{ "type":"structure", "members":{ @@ -1568,7 +1608,8 @@ "CreatedTime":{"shape":"Timestamp"}, "PublicBaseImageReleasedDate":{"shape":"Timestamp"}, "AppstreamAgentVersion":{"shape":"AppstreamAgentVersion"}, - "ImagePermissions":{"shape":"ImagePermissions"} + "ImagePermissions":{"shape":"ImagePermissions"}, + "ImageErrors":{"shape":"ResourceErrors"} } }, "ImageBuilder":{ @@ -1610,7 +1651,9 @@ "REBOOTING", "SNAPSHOTTING", "DELETING", - "FAILED" + "FAILED", + "UPDATING", + "PENDING_QUALIFICATION" ] }, "ImageBuilderStateChangeReason":{ diff --git a/models/apis/appstream/2016-12-01/docs-2.json b/models/apis/appstream/2016-12-01/docs-2.json index de996874650..a572b019cb2 100644 --- a/models/apis/appstream/2016-12-01/docs-2.json +++ b/models/apis/appstream/2016-12-01/docs-2.json @@ -12,6 +12,7 @@ "CreateImageBuilderStreamingURL": "

Creates a URL to start an image builder streaming session.

", "CreateStack": "

Creates a stack to start streaming applications to users. A stack consists of an associated fleet, user access policies, and storage configurations.

", "CreateStreamingURL": "

Creates a temporary URL to start an AppStream 2.0 streaming session for the specified user. A streaming URL enables application streaming to be tested without user setup.

", + "CreateUpdatedImage": "

Creates a new image with the latest Windows operating system updates, driver updates, and AppStream 2.0 agent software.

For more information, see the \"Update an Image by Using Managed AppStream 2.0 Image Updates\" section in Administer Your AppStream 2.0 Images, in the Amazon AppStream 2.0 Administration Guide.

", "CreateUsageReportSubscription": "

Creates a usage report subscription. Usage reports are generated daily.

", "CreateUser": "

Creates a new user in the user pool.

", "DeleteDirectoryConfig": "

Deletes the specified Directory Config object from AppStream 2.0. This object includes the information required to join streaming instances to an Active Directory domain.

", @@ -222,6 +223,8 @@ "Application$Enabled": "

If there is a problem, the application can be disabled after image creation.

", "ApplicationSettings$Enabled": "

Enables or disables persistent application settings for users during their streaming sessions.

", "ApplicationSettingsResponse$Enabled": "

Specifies whether persistent application settings are enabled for users during their streaming sessions.

", + "CreateUpdatedImageRequest$dryRun": "

Indicates whether to display the status of image update availability before AppStream 2.0 initiates the process of creating a new updated image. If this value is set to true, AppStream 2.0 displays whether image updates are available. If this value is set to false, AppStream 2.0 initiates the process of creating a new updated image without displaying whether image updates are available.

", + "CreateUpdatedImageResult$canUpdateImage": "

Indicates whether a new image can be created.

", "Image$ImageBuilderSupported": "

Indicates whether an image builder can be launched from this image.

", "UpdateFleetRequest$DeleteVpcConfig": "

Deletes the VPC association for the specified fleet.

", "UpdateStackRequest$DeleteStorageConnectors": "

Deletes the storage connectors currently enabled for the stack.

", @@ -329,6 +332,16 @@ "refs": { } }, + "CreateUpdatedImageRequest": { + "base": null, + "refs": { + } + }, + "CreateUpdatedImageResult": { + "base": null, + "refs": { + } + }, "CreateUsageReportSubscriptionRequest": { "base": null, "refs": { @@ -542,6 +555,7 @@ "CreateFleetRequest$Description": "

The description to display.

", "CreateImageBuilderRequest$Description": "

The description to display.

", "CreateStackRequest$Description": "

The description to display.

", + "CreateUpdatedImageRequest$newImageDescription": "

The description to display for the new image.

", "UpdateFleetRequest$Description": "

The description to display.

", "UpdateStackRequest$Description": "

The description to display.

" } @@ -603,6 +617,7 @@ "CreateFleetRequest$DisplayName": "

The fleet name to display.

", "CreateImageBuilderRequest$DisplayName": "

The image builder name to display.

", "CreateStackRequest$DisplayName": "

The stack name to display.

", + "CreateUpdatedImageRequest$newImageDisplayName": "

The name to display for the new image.

", "UpdateFleetRequest$DisplayName": "

The fleet name to display.

", "UpdateStackRequest$DisplayName": "

The stack name to display.

" } @@ -749,6 +764,7 @@ "Image": { "base": "

Describes an image.

", "refs": { + "CreateUpdatedImageResult$image": null, "DeleteImageResult$Image": "

Information about the image.

", "ImageList$member": null } @@ -820,7 +836,7 @@ } }, "IncompatibleImageException": { - "base": "

The image does not support storage connectors.

", + "base": "

The image can't be updated because it's not compatible for updates.

", "refs": { } }, @@ -945,6 +961,8 @@ "CreateFleetRequest$Name": "

A unique name for the fleet.

", "CreateImageBuilderRequest$Name": "

A unique name for the image builder.

", "CreateStackRequest$Name": "

The name of the stack.

", + "CreateUpdatedImageRequest$existingImageName": "

The name of the image to update.

", + "CreateUpdatedImageRequest$newImageName": "

The name of the new image. The name must be unique within the AWS account and Region.

", "DeleteImageBuilderRequest$Name": "

The name of the image builder.

", "DeleteImagePermissionsRequest$Name": "

The name of the private image.

", "DeleteImageRequest$Name": "

The name of the image.

", @@ -1026,6 +1044,7 @@ "ResourceErrors": { "base": null, "refs": { + "Image$ImageErrors": "

Describes the errors that are returned when a new image can't be created.

", "ImageBuilder$ImageBuilderErrors": "

The image builder errors.

" } }, @@ -1381,6 +1400,7 @@ "CreateFleetRequest$Tags": "

The tags to associate with the fleet. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

", "CreateImageBuilderRequest$Tags": "

The tags to associate with the image builder. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

", "CreateStackRequest$Tags": "

The tags to associate with the stack. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

", + "CreateUpdatedImageRequest$newImageTags": "

The tags to associate with the new image. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

", "ListTagsForResourceResponse$Tags": "

The information about the tags.

", "TagResourceRequest$Tags": "

The tags to associate. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

If you do not specify a value, the value is set to an empty string.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

" } diff --git a/models/apis/autoscaling/2011-01-01/api-2.json b/models/apis/autoscaling/2011-01-01/api-2.json index c8ee7a98d79..59a1a0e6f7b 100644 --- a/models/apis/autoscaling/2011-01-01/api-2.json +++ b/models/apis/autoscaling/2011-01-01/api-2.json @@ -246,6 +246,24 @@ {"shape":"ResourceInUseFault"} ] }, + "DeleteWarmPool":{ + "name":"DeleteWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWarmPoolType"}, + "output":{ + "shape":"DeleteWarmPoolAnswer", + "resultWrapper":"DeleteWarmPoolResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceInUseFault"} + ] + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -534,6 +552,23 @@ {"shape":"ResourceContentionFault"} ] }, + "DescribeWarmPool":{ + "name":"DescribeWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWarmPoolType"}, + "output":{ + "shape":"DescribeWarmPoolAnswer", + "resultWrapper":"DescribeWarmPoolResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, "DetachInstances":{ "name":"DetachInstances", "http":{ @@ -702,6 +737,22 @@ {"shape":"ResourceContentionFault"} ] }, + "PutWarmPool":{ + "name":"PutWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutWarmPoolType"}, + "output":{ + "shape":"PutWarmPoolAnswer", + "resultWrapper":"PutWarmPoolResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ] + }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", "http":{ @@ -1005,7 +1056,9 @@ "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, "ServiceLinkedRoleARN":{"shape":"ResourceName"}, "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"}, - "CapacityRebalance":{"shape":"CapacityRebalanceEnabled"} + "CapacityRebalance":{"shape":"CapacityRebalanceEnabled"}, + "WarmPoolConfiguration":{"shape":"WarmPoolConfiguration"}, + "WarmPoolSize":{"shape":"WarmPoolSize"} } }, "AutoScalingGroupDesiredCapacity":{"type":"integer"}, @@ -1336,6 +1389,19 @@ "Tags":{"shape":"Tags"} } }, + "DeleteWarmPoolAnswer":{ + "type":"structure", + "members":{ + } + }, + "DeleteWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "ForceDelete":{"shape":"ForceDelete"} + } + }, "DescribeAccountLimitsAnswer":{ "type":"structure", "members":{ @@ -1502,6 +1568,23 @@ "TerminationPolicyTypes":{"shape":"TerminationPolicies"} } }, + "DescribeWarmPoolAnswer":{ + "type":"structure", + "members":{ + "WarmPoolConfiguration":{"shape":"WarmPoolConfiguration"}, + "Instances":{"shape":"Instances"}, + "NextToken":{"shape":"XmlString"} + } + }, + "DescribeWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "MaxRecords":{"shape":"MaxRecords"}, + "NextToken":{"shape":"XmlString"} + } + }, "DetachInstancesAnswer":{ "type":"structure", "members":{ @@ -1739,7 +1822,8 @@ "StartTime":{"shape":"TimestampType"}, "EndTime":{"shape":"TimestampType"}, "PercentageComplete":{"shape":"IntPercent"}, - "InstancesToUpdate":{"shape":"InstancesToUpdate"} + "InstancesToUpdate":{"shape":"InstancesToUpdate"}, + "ProgressDetails":{"shape":"InstanceRefreshProgressDetails"} } }, "InstanceRefreshIds":{ @@ -1758,6 +1842,20 @@ }, "exception":true }, + "InstanceRefreshLivePoolProgress":{ + "type":"structure", + "members":{ + "PercentageComplete":{"shape":"IntPercent"}, + "InstancesToUpdate":{"shape":"InstancesToUpdate"} + } + }, + "InstanceRefreshProgressDetails":{ + "type":"structure", + "members":{ + "LivePoolProgress":{"shape":"InstanceRefreshLivePoolProgress"}, + "WarmPoolProgress":{"shape":"InstanceRefreshWarmPoolProgress"} + } + }, "InstanceRefreshStatus":{ "type":"string", "enum":[ @@ -1769,6 +1867,13 @@ "Cancelled" ] }, + "InstanceRefreshWarmPoolProgress":{ + "type":"structure", + "members":{ + "PercentageComplete":{"shape":"IntPercent"}, + "InstancesToUpdate":{"shape":"InstancesToUpdate"} + } + }, "InstanceRefreshes":{ "type":"list", "member":{"shape":"InstanceRefresh"} @@ -1964,7 +2069,16 @@ "Detaching", "Detached", "EnteringStandby", - "Standby" + "Standby", + "Warmed:Pending", + "Warmed:Pending:Wait", + "Warmed:Pending:Proceed", + "Warmed:Terminating", + "Warmed:Terminating:Wait", + "Warmed:Terminating:Proceed", + "Warmed:Terminated", + "Warmed:Stopped", + "Warmed:Running" ] }, "LifecycleTransition":{"type":"string"}, @@ -2006,6 +2120,10 @@ "type":"list", "member":{"shape":"LoadBalancerTargetGroupState"} }, + "MaxGroupPreparedCapacity":{ + "type":"integer", + "min":-1 + }, "MaxInstanceLifetime":{"type":"integer"}, "MaxNumberOfAutoScalingGroups":{"type":"integer"}, "MaxNumberOfLaunchConfigurations":{"type":"integer"}, @@ -2255,6 +2373,21 @@ "TimeZone":{"shape":"XmlStringMaxLen255"} } }, + "PutWarmPoolAnswer":{ + "type":"structure", + "members":{ + } + }, + "PutWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, + "MaxGroupPreparedCapacity":{"shape":"MaxGroupPreparedCapacity"}, + "MinSize":{"shape":"WarmPoolMinSize"}, + "PoolState":{"shape":"WarmPoolState"} + } + }, "RecordLifecycleActionHeartbeatAnswer":{ "type":"structure", "members":{ @@ -2642,6 +2775,31 @@ "type":"list", "member":{"shape":"XmlString"} }, + "WarmPoolConfiguration":{ + "type":"structure", + "members":{ + "MaxGroupPreparedCapacity":{"shape":"MaxGroupPreparedCapacity"}, + "MinSize":{"shape":"WarmPoolMinSize"}, + "PoolState":{"shape":"WarmPoolState"}, + "Status":{"shape":"WarmPoolStatus"} + } + }, + "WarmPoolMinSize":{ + "type":"integer", + "min":0 + }, + "WarmPoolSize":{"type":"integer"}, + "WarmPoolState":{ + "type":"string", + "enum":[ + "Stopped", + "Running" + ] + }, + "WarmPoolStatus":{ + "type":"string", + "enum":["PendingDelete"] + }, "XmlString":{ "type":"string", "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index ec6b09e0c85..c0c7c664c2f 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -19,6 +19,7 @@ "DeletePolicy": "

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a scaling policy in the Amazon EC2 Auto Scaling User Guide.

", "DeleteScheduledAction": "

Deletes the specified scheduled action.

", "DeleteTags": "

Deletes the specified tags.

", + "DeleteWarmPool": "

Deletes the warm pool for the specified Auto Scaling group.

", "DescribeAccountLimits": "

Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS account.

For information about requesting an increase, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

", "DescribeAdjustmentTypes": "

Describes the available adjustment types for Amazon EC2 Auto Scaling scaling policies. These settings apply to step scaling policies and simple scaling policies; they do not apply to target tracking scaling policies.

The following adjustment types are supported:

", "DescribeAutoScalingGroups": "

Describes one or more Auto Scaling groups.

", @@ -38,6 +39,7 @@ "DescribeScheduledActions": "

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, call the DescribeScalingActivities API.

", "DescribeTags": "

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

", "DescribeTerminationPolicyTypes": "

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Controlling which Auto Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.

", + "DescribeWarmPool": "

Describes a warm pool and its instances.

", "DetachInstances": "

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

", "DetachLoadBalancerTargetGroups": "

Detaches one or more target groups from the specified Auto Scaling group.

", "DetachLoadBalancers": "

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.

", @@ -50,14 +52,15 @@ "PutNotificationConfiguration": "

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.

", "PutScalingPolicy": "

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group, see Target tracking scaling policies and Step and simple scaling policies in the Amazon EC2 Auto Scaling User Guide.

", "PutScheduledUpdateGroupAction": "

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

", - "RecordLifecycleActionHeartbeat": "

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling lifecycle in the Amazon EC2 Auto Scaling User Guide.

", + "PutWarmPool": "

Adds a warm pool to the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration that requests Spot Instances.

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API.

", + "RecordLifecycleActionHeartbeat": "

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

", "ResumeProcesses": "

Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

", "SetDesiredCapacity": "

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

", "SetInstanceHealth": "

Sets the health status of the specified instance.

For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

", - "SetInstanceProtection": "

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

", - "StartInstanceRefresh": "

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

", + "SetInstanceProtection": "

Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

", + "StartInstanceRefresh": "

Starts a new instance refresh operation, which triggers a rolling replacement of previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

", "SuspendProcesses": "

Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

", - "TerminateInstanceInAutoScalingGroup": "

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

", + "TerminateInstanceInAutoScalingGroup": "

Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

", "UpdateAutoScalingGroup": "

We strongly recommend that all Auto Scaling groups use launch templates to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

To see which parameters have been set, call the DescribeAutoScalingGroups API. To view the scaling policies for an Auto Scaling group, call the DescribePolicies API. If the group has scaling policies, you can update them by calling the PutScalingPolicy API.

" }, "shapes": { @@ -450,6 +453,16 @@ "refs": { } }, + "DeleteWarmPoolAnswer": { + "base": null, + "refs": { + } + }, + "DeleteWarmPoolType": { + "base": null, + "refs": { + } + }, "DescribeAccountLimitsAnswer": { "base": null, "refs": { @@ -555,6 +568,16 @@ "refs": { } }, + "DescribeWarmPoolAnswer": { + "base": null, + "refs": { + } + }, + "DescribeWarmPoolType": { + "base": null, + "refs": { + } + }, "DetachInstancesAnswer": { "base": null, "refs": { @@ -686,7 +709,8 @@ "ForceDelete": { "base": null, "refs": { - "DeleteAutoScalingGroupType$ForceDelete": "

Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.

" + "DeleteAutoScalingGroupType$ForceDelete": "

Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any outstanding lifecycle actions associated with the group.

", + "DeleteWarmPoolType$ForceDelete": "

Specifies that the warm pool is to be deleted along with all instances associated with the warm pool, without waiting for all instances to be terminated. This parameter also deletes any outstanding lifecycle actions associated with the warm pool instances.

" } }, "GlobalTimeout": { @@ -800,12 +824,30 @@ "refs": { } }, + "InstanceRefreshLivePoolProgress": { + "base": "

Reports the progress of an instance fresh on instances that are in the Auto Scaling group.

", + "refs": { + "InstanceRefreshProgressDetails$LivePoolProgress": "

Indicates the progress of an instance fresh on instances that are in the Auto Scaling group.

" + } + }, + "InstanceRefreshProgressDetails": { + "base": "

Reports the progress of an instance refresh on an Auto Scaling group that has a warm pool. This includes separate details for instances in the warm pool and instances in the Auto Scaling group (the live pool).

", + "refs": { + "InstanceRefresh$ProgressDetails": "

Additional progress details for an Auto Scaling group that has a warm pool.

" + } + }, "InstanceRefreshStatus": { "base": null, "refs": { "InstanceRefresh$Status": "

The current status for the instance refresh operation:

" } }, + "InstanceRefreshWarmPoolProgress": { + "base": "

Reports the progress of an instance fresh on instances that are in the warm pool.

", + "refs": { + "InstanceRefreshProgressDetails$WarmPoolProgress": "

Indicates the progress of an instance fresh on instances that are in the warm pool.

" + } + }, "InstanceRefreshes": { "base": null, "refs": { @@ -815,7 +857,8 @@ "Instances": { "base": null, "refs": { - "AutoScalingGroup$Instances": "

The EC2 instances associated with the group.

" + "AutoScalingGroup$Instances": "

The EC2 instances associated with the group.

", + "DescribeWarmPoolAnswer$Instances": "

The instances that are currently in the warm pool.

" } }, "InstancesDistribution": { @@ -827,13 +870,17 @@ "InstancesToUpdate": { "base": null, "refs": { - "InstanceRefresh$InstancesToUpdate": "

The number of instances remaining to update before the instance refresh is complete.

" + "InstanceRefresh$InstancesToUpdate": "

The number of instances remaining to update before the instance refresh is complete.

", + "InstanceRefreshLivePoolProgress$InstancesToUpdate": "

The number of instances remaining to update.

", + "InstanceRefreshWarmPoolProgress$InstancesToUpdate": "

The number of instances remaining to update.

" } }, "IntPercent": { "base": null, "refs": { "InstanceRefresh$PercentageComplete": "

The percentage of the instance refresh that is complete. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

", + "InstanceRefreshLivePoolProgress$PercentageComplete": "

The percentage of instances in the Auto Scaling group that have been replaced. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

", + "InstanceRefreshWarmPoolProgress$PercentageComplete": "

The percentage of instances in the warm pool that have been replaced. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

", "RefreshPreferences$MinHealthyPercentage": "

The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90.

" } }, @@ -1003,6 +1050,13 @@ "DescribeLoadBalancerTargetGroupsResponse$LoadBalancerTargetGroups": "

Information about the target groups.

" } }, + "MaxGroupPreparedCapacity": { + "base": null, + "refs": { + "PutWarmPoolType$MaxGroupPreparedCapacity": "

Specifies the total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group. This is an optional property. Specify it only if the warm pool size should not be determined by the difference between the group's maximum capacity and its desired capacity.

Amazon EC2 Auto Scaling will launch and maintain either the difference between the group's maximum capacity and its desired capacity, if a value for MaxGroupPreparedCapacity is not specified, or the difference between the MaxGroupPreparedCapacity and the desired capacity, if a value for MaxGroupPreparedCapacity is specified.

The size of the warm pool is dynamic. Only when MaxGroupPreparedCapacity and MinSize are set to the same value does the warm pool have an absolute size.

If the desired capacity of the Auto Scaling group is higher than the MaxGroupPreparedCapacity, the capacity of the warm pool is 0. To remove a value that you previously set, include the property but specify -1 for the value.

", + "WarmPoolConfiguration$MaxGroupPreparedCapacity": "

The total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group.

" + } + }, "MaxInstanceLifetime": { "base": null, "refs": { @@ -1036,6 +1090,7 @@ "DescribeScalingActivitiesType$MaxRecords": "

The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.

", "DescribeScheduledActionsType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

", "DescribeTagsType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

", + "DescribeWarmPoolType$MaxRecords": "

The maximum number of instances to return with this call. The maximum value is 50.

", "LaunchConfigurationNamesType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

" } }, @@ -1130,8 +1185,8 @@ "Metrics": { "base": null, "refs": { - "DisableMetricsCollectionQuery$Metrics": "

Specifies one or more of the following metrics:

If you omit this parameter, all metrics are disabled.

", - "EnableMetricsCollectionQuery$Metrics": "

Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:

The instance weighting feature supports the following additional metrics:

If you omit this parameter, all metrics are enabled.

" + "DisableMetricsCollectionQuery$Metrics": "

Specifies one or more of the following metrics:

If you omit this parameter, all metrics are disabled.

", + "EnableMetricsCollectionQuery$Metrics": "

Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:

The instance weighting feature supports the following additional metrics:

The warm pools feature supports the following additional metrics:

If you omit this parameter, all metrics are enabled.

" } }, "MinAdjustmentMagnitude": { @@ -1333,6 +1388,16 @@ "refs": { } }, + "PutWarmPoolAnswer": { + "base": null, + "refs": { + } + }, + "PutWarmPoolType": { + "base": null, + "refs": { + } + }, "RecordLifecycleActionHeartbeatAnswer": { "base": null, "refs": { @@ -1664,6 +1729,39 @@ "Filter$Values": "

One or more filter values. Filter values are case-sensitive.

" } }, + "WarmPoolConfiguration": { + "base": "

Describes a warm pool configuration.

", + "refs": { + "AutoScalingGroup$WarmPoolConfiguration": "

The warm pool for the group.

", + "DescribeWarmPoolAnswer$WarmPoolConfiguration": "

The warm pool configuration details.

" + } + }, + "WarmPoolMinSize": { + "base": null, + "refs": { + "PutWarmPoolType$MinSize": "

Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.

", + "WarmPoolConfiguration$MinSize": "

The minimum number of instances to maintain in the warm pool.

" + } + }, + "WarmPoolSize": { + "base": null, + "refs": { + "AutoScalingGroup$WarmPoolSize": "

The current size of the warm pool.

" + } + }, + "WarmPoolState": { + "base": null, + "refs": { + "PutWarmPoolType$PoolState": "

Sets the instance state to transition to after the lifecycle hooks finish. Valid values are: Stopped (default) or Running.

", + "WarmPoolConfiguration$PoolState": "

The instance state to transition to after the lifecycle actions are complete: Stopped or Running.

" + } + }, + "WarmPoolStatus": { + "base": null, + "refs": { + "WarmPoolConfiguration$Status": "

The status of a warm pool that is marked for deletion.

" + } + }, "XmlString": { "base": null, "refs": { @@ -1688,6 +1786,8 @@ "DescribeScalingActivitiesType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "DescribeScheduledActionsType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "DescribeTagsType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", + "DescribeWarmPoolAnswer$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", + "DescribeWarmPoolType$NextToken": "

The token for the next set of instances to return. (You received this token from a previous call.)

", "FailedScheduledUpdateGroupActionRequest$ErrorMessage": "

The error message accompanying the error code.

", "Filter$Name": "

The name of the filter. The valid values are: auto-scaling-group, key, value, and propagate-at-launch.

", "InstancesDistribution$OnDemandAllocationStrategy": "

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

", @@ -1793,6 +1893,7 @@ "DeletePolicyType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DeleteScheduledActionType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DeleteScheduledActionType$ScheduledActionName": "

The name of the action to delete.

", + "DeleteWarmPoolType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DescribeInstanceRefreshesType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DescribeLifecycleHooksType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DescribeLoadBalancerTargetGroupsRequest$AutoScalingGroupName": "

The name of the Auto Scaling group.

", @@ -1800,6 +1901,7 @@ "DescribePoliciesType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DescribeScalingActivitiesType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DescribeScheduledActionsType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", + "DescribeWarmPoolType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DetachInstancesQuery$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DetachLoadBalancerTargetGroupsType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "DetachLoadBalancersType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", @@ -1807,7 +1909,7 @@ "Ebs$SnapshotId": "

The snapshot ID of the volume to use.

You must specify either a VolumeSize or a SnapshotId.

", "EnableMetricsCollectionQuery$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "EnableMetricsCollectionQuery$Granularity": "

The granularity to associate with the metrics to collect. The only valid value is 1Minute.

", - "EnabledMetric$Metric": "

One of the following metrics:

", + "EnabledMetric$Metric": "

One of the following metrics:

", "EnabledMetric$Granularity": "

The granularity of the metric. The only valid value is 1Minute.

", "EnterStandbyQuery$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "ExecutePolicyType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", @@ -1841,7 +1943,7 @@ "LoadBalancerState$LoadBalancerName": "

The name of the load balancer.

", "LoadBalancerState$State": "

One of the following load balancer states:

", "LoadBalancerTargetGroupState$State": "

The state of the target group.

", - "MetricCollectionType$Metric": "

One of the following metrics:

", + "MetricCollectionType$Metric": "

One of the following metrics:

", "MetricGranularityType$Granularity": "

The granularity. The only valid value is 1Minute.

", "Metrics$member": null, "NotificationConfiguration$AutoScalingGroupName": "

The name of the Auto Scaling group.

", @@ -1860,6 +1962,7 @@ "PutScheduledUpdateGroupActionType$ScheduledActionName": "

The name of this scaling action.

", "PutScheduledUpdateGroupActionType$Recurrence": "

The recurring schedule for this action. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

Cron expressions use Universal Coordinated Time (UTC) by default.

", "PutScheduledUpdateGroupActionType$TimeZone": "

Specifies the time zone for a cron expression. If a time zone is not provided, UTC is used by default.

Valid values are the canonical names of the IANA time zones, derived from the IANA Time Zone Database (such as Etc/GMT+9 or Pacific/Tahiti). For more information, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.

", + "PutWarmPoolType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "ResourceContentionFault$message": "

", "ResourceInUseFault$message": "

", "ScalingActivityInProgressFault$message": "

", @@ -1891,7 +1994,7 @@ "base": null, "refs": { "AutoScalingGroup$HealthCheckType": "

The service to use for the health checks. The valid values are EC2 and ELB. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

", - "AutoScalingInstanceDetails$LifecycleState": "

The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid Values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby

", + "AutoScalingInstanceDetails$LifecycleState": "

The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid Values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running

", "AutoScalingInstanceDetails$HealthStatus": "

The last reported health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.

", "AutoScalingInstanceDetails$WeightedCapacity": "

The number of capacity units contributed by the instance based on its instance type.

Valid Range: Minimum value of 1. Maximum value of 999.

", "CreateAutoScalingGroupType$HealthCheckType": "

The service to use for the health checks. The valid values are EC2 (default) and ELB. If you configure an Auto Scaling group to use load balancer (ELB) health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

", diff --git a/models/apis/autoscaling/2011-01-01/examples-1.json b/models/apis/autoscaling/2011-01-01/examples-1.json index 731daca0278..5720b49068d 100644 --- a/models/apis/autoscaling/2011-01-01/examples-1.json +++ b/models/apis/autoscaling/2011-01-01/examples-1.json @@ -122,9 +122,12 @@ { "input": { "AutoScalingGroupName": "my-auto-scaling-group", - "HealthCheckGracePeriod": 120, + "HealthCheckGracePeriod": 300, "HealthCheckType": "ELB", - "LaunchConfigurationName": "my-launch-config", + "LaunchTemplate": { + "LaunchTemplateId": "lt-0a20c965061f64abc", + "Version": "$Default" + }, "MaxSize": 3, "MinSize": 1, "TargetGroupARNs": [ @@ -144,18 +147,39 @@ }, { "input": { - "AutoScalingGroupName": "my-auto-scaling-group", - "AvailabilityZones": [ - "us-west-2c" - ], - "HealthCheckGracePeriod": 120, - "HealthCheckType": "ELB", - "LaunchConfigurationName": "my-launch-config", - "LoadBalancerNames": [ - "my-load-balancer" - ], - "MaxSize": 3, - "MinSize": 1 + "AutoScalingGroupName": "my-asg", + "DesiredCapacity": 3, + "MaxSize": 5, + "MinSize": 1, + "MixedInstancesPolicy": { + "InstancesDistribution": { + "OnDemandBaseCapacity": 1, + "OnDemandPercentageAboveBaseCapacity": 50, + "SpotAllocationStrategy": "capacity-optimized" + }, + "LaunchTemplate": { + "LaunchTemplateSpecification": { + "LaunchTemplateName": "my-launch-template-for-x86", + "Version": "$Latest" + }, + "Overrides": [ + { + "InstanceType": "c6g.large", + "LaunchTemplateSpecification": { + "LaunchTemplateName": "my-launch-template-for-arm", + "Version": "$Latest" + } + }, + { + "InstanceType": "c5.large" + }, + { + "InstanceType": "c5a.large" + } + ] + } + }, + "VPCZoneIdentifier": "subnet-057fa0918fEXAMPLE, subnet-610acd08EXAMPLE" }, "comments": { "input": { @@ -163,9 +187,9 @@ "output": { } }, - "description": "This example creates an Auto Scaling group and attaches the specified Classic Load Balancer.", - "id": "autoscaling-create-auto-scaling-group-3", - "title": "To create an Auto Scaling group with an attached load balancer" + "description": "This example creates an Auto Scaling group with a mixed instances policy. It specifies the c5.large, c5a.large, and c6g.large instance types and defines a different launch template for the c6g.large instance type.", + "id": "to-create-an-auto-scaling-group-with-a-mixed-instances-policy-1617815269039", + "title": "To create an Auto Scaling group with a mixed instances policy" } ], "CreateLaunchConfiguration": [ @@ -1224,7 +1248,7 @@ "TargetTrackingConfiguration": { "PredefinedMetricSpecification": { "PredefinedMetricType": "ALBRequestCountPerTarget", - "ResourceLabel": "app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d" + "ResourceLabel": "app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff" }, "TargetValue": 1000.0 } @@ -1275,6 +1299,24 @@ "title": "To add a scheduled action to an Auto Scaling group" } ], + "PutWarmPool": [ + { + "input": { + "AutoScalingGroupName": "my-auto-scaling-group", + "MinSize": 30, + "PoolState": "Stopped" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example adds a warm pool to the specified Auto Scaling group.", + "id": "to-add-a-warm-pool-to-an-auto-scaling-group-1617818810383", + "title": "To add a warm pool to an Auto Scaling group" + } + ], "RecordLifecycleActionHeartbeat": [ { "input": { diff --git a/models/apis/customer-profiles/2020-08-15/docs-2.json b/models/apis/customer-profiles/2020-08-15/docs-2.json index 5027facbc13..4deb504d8f4 100644 --- a/models/apis/customer-profiles/2020-08-15/docs-2.json +++ b/models/apis/customer-profiles/2020-08-15/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "Amazon Connect Customer Profiles

Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

If you're new to Amazon Connect, you might find it helpful to also review the Amazon Connect Administrator Guide.

", + "service": "Amazon Connect Customer Profiles

Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

If you're new to Amazon Connect , you might find it helpful to also review the Amazon Connect Administrator Guide.

", "operations": { "AddProfileKey": "

Associates a new key value with a specific profile, such as a Contact Trace Record (CTR) ContactId.

A profile object can have a single unique key and any number of additional keys that can be used to identify the profile that it belongs to.

", "CreateDomain": "

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

", @@ -125,25 +125,25 @@ "BucketName": { "base": null, "refs": { - "S3SourceProperties$BucketName": null + "S3SourceProperties$BucketName": "

The Amazon S3 bucket name where the source files are stored.

" } }, "BucketPrefix": { "base": null, "refs": { - "S3SourceProperties$BucketPrefix": null + "S3SourceProperties$BucketPrefix": "

The object key for the Amazon S3 bucket in which the source files are stored.

" } }, "ConnectorOperator": { - "base": null, + "base": "

The operation to be performed on the provided source fields.

", "refs": { - "Task$ConnectorOperator": null + "Task$ConnectorOperator": "

The operation to be performed on the provided source fields.

" } }, "ConnectorProfileName": { "base": null, "refs": { - "SourceFlowConfig$ConnectorProfileName": null + "SourceFlowConfig$ConnectorProfileName": "

The name of the AppFlow connector profile. This name must be unique for each connector profile in the AWS account.

" } }, "CreateDomainRequest": { @@ -169,21 +169,21 @@ "DataPullMode": { "base": null, "refs": { - "ScheduledTriggerProperties$DataPullMode": null + "ScheduledTriggerProperties$DataPullMode": "

Specifies whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run.

" } }, "Date": { "base": null, "refs": { - "ScheduledTriggerProperties$ScheduleStartTime": null, - "ScheduledTriggerProperties$ScheduleEndTime": null, - "ScheduledTriggerProperties$FirstExecutionFrom": null + "ScheduledTriggerProperties$ScheduleStartTime": "

Specifies the scheduled start time for a scheduled-trigger flow.

", + "ScheduledTriggerProperties$ScheduleEndTime": "

Specifies the scheduled end time for a scheduled-trigger flow.

", + "ScheduledTriggerProperties$FirstExecutionFrom": "

Specifies the date range for the records to import from the connector in the first flow run.

" } }, "DatetimeTypeFieldName": { "base": null, "refs": { - "IncrementalPullConfig$DatetimeTypeFieldName": null + "IncrementalPullConfig$DatetimeTypeFieldName": "

A field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source.

" } }, "DeleteDomainRequest": { @@ -249,7 +249,7 @@ "DestinationField": { "base": null, "refs": { - "Task$DestinationField": null + "Task$DestinationField": "

A field in a destination connector, or a field value against which Amazon AppFlow validates a source field.

" } }, "DomainList": { @@ -286,21 +286,21 @@ } }, "FlowDefinition": { - "base": null, + "base": "

The configurations that control how Customer Profiles retrieves data from the source, Amazon AppFlow. Customer Profiles uses this information to create an AppFlow flow on behalf of customers.

", "refs": { - "PutIntegrationRequest$FlowDefinition": null + "PutIntegrationRequest$FlowDefinition": "

The configuration that controls how Customer Profiles retrieves data from the source.

" } }, "FlowDescription": { "base": null, "refs": { - "FlowDefinition$Description": null + "FlowDefinition$Description": "

A description of the flow you want to create.

" } }, "FlowName": { "base": null, "refs": { - "FlowDefinition$FlowName": null + "FlowDefinition$FlowName": "

The specified name of the flow. Use underscores (_) or hyphens (-) only. Spaces are not allowed.

" } }, "Gender": { @@ -352,9 +352,9 @@ } }, "IncrementalPullConfig": { - "base": null, + "base": "

Specifies the configuration used when importing incremental records from the source.

", "refs": { - "SourceFlowConfig$IncrementalPullConfig": null + "SourceFlowConfig$IncrementalPullConfig": "

Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull.

" } }, "IntegrationList": { @@ -381,7 +381,7 @@ "KmsArn": { "base": null, "refs": { - "FlowDefinition$KmsArn": null + "FlowDefinition$KmsArn": "

The Amazon Resource Name of the AWS Key Management Service (KMS) key you provide for encryption.

" } }, "ListAccountIntegrationsRequest": { @@ -487,22 +487,22 @@ "MarketoConnectorOperator": { "base": null, "refs": { - "ConnectorOperator$Marketo": null + "ConnectorOperator$Marketo": "

The operation to be performed on the provided Marketo source fields.

" } }, "MarketoSourceProperties": { - "base": null, + "base": "

The properties that are applied when Marketo is being used as a source.

", "refs": { - "SourceConnectorProperties$Marketo": null + "SourceConnectorProperties$Marketo": "

The properties that are applied when Marketo is being used as a source.

" } }, "Object": { "base": null, "refs": { - "MarketoSourceProperties$Object": null, - "SalesforceSourceProperties$Object": null, - "ServiceNowSourceProperties$Object": null, - "ZendeskSourceProperties$Object": null + "MarketoSourceProperties$Object": "

The object specified in the Marketo flow source.

", + "SalesforceSourceProperties$Object": "

The object specified in the Salesforce flow source.

", + "ServiceNowSourceProperties$Object": "

The object specified in the ServiceNow flow source.

", + "ZendeskSourceProperties$Object": "

The object specified in the Zendesk flow source.

" } }, "ObjectTypeField": { @@ -611,43 +611,43 @@ "S3ConnectorOperator": { "base": null, "refs": { - "ConnectorOperator$S3": null + "ConnectorOperator$S3": "

The operation to be performed on the provided Amazon S3 source fields.

" } }, "S3SourceProperties": { - "base": null, + "base": "

The properties that are applied when Amazon S3 is being used as the flow source.

", "refs": { - "SourceConnectorProperties$S3": null + "SourceConnectorProperties$S3": "

The properties that are applied when Amazon S3 is being used as the flow source.

" } }, "SalesforceConnectorOperator": { "base": null, "refs": { - "ConnectorOperator$Salesforce": null + "ConnectorOperator$Salesforce": "

The operation to be performed on the provided Salesforce source fields.

" } }, "SalesforceSourceProperties": { - "base": null, + "base": "

The properties that are applied when Salesforce is being used as a source.

", "refs": { - "SourceConnectorProperties$Salesforce": null + "SourceConnectorProperties$Salesforce": "

The properties that are applied when Salesforce is being used as a source.

" } }, "ScheduleExpression": { "base": null, "refs": { - "ScheduledTriggerProperties$ScheduleExpression": null + "ScheduledTriggerProperties$ScheduleExpression": "

The scheduling expression that determines the rate at which the schedule will run, for example rate (5 minutes).

" } }, "ScheduleOffset": { "base": null, "refs": { - "ScheduledTriggerProperties$ScheduleOffset": null + "ScheduledTriggerProperties$ScheduleOffset": "

Specifies the optional offset that is added to the time interval for a schedule-triggered flow.

" } }, "ScheduledTriggerProperties": { - "base": null, + "base": "

Specifies the configuration details of a scheduled-trigger flow that you define. Currently, these settings only apply to the scheduled-trigger type.

", "refs": { - "TriggerProperties$Scheduled": null + "TriggerProperties$Scheduled": "

Specifies the configuration details of a schedule-triggered flow that you define.

" } }, "SearchProfilesRequest": { @@ -663,37 +663,37 @@ "ServiceNowConnectorOperator": { "base": null, "refs": { - "ConnectorOperator$ServiceNow": null + "ConnectorOperator$ServiceNow": "

The operation to be performed on the provided ServiceNow source fields.

" } }, "ServiceNowSourceProperties": { - "base": null, + "base": "

The properties that are applied when ServiceNow is being used as a source.

", "refs": { - "SourceConnectorProperties$ServiceNow": null + "SourceConnectorProperties$ServiceNow": "

The properties that are applied when ServiceNow is being used as a source.

" } }, "SourceConnectorProperties": { - "base": null, + "base": "

Specifies the information that is required to query a particular Amazon AppFlow connector. Customer Profiles supports Salesforce, Zendesk, Marketo, ServiceNow and Amazon S3.

", "refs": { - "SourceFlowConfig$SourceConnectorProperties": null + "SourceFlowConfig$SourceConnectorProperties": "

Specifies the information that is required to query a particular source connector.

" } }, "SourceConnectorType": { "base": null, "refs": { - "SourceFlowConfig$ConnectorType": null + "SourceFlowConfig$ConnectorType": "

The type of connector, such as Salesforce, Marketo, and so on.

" } }, "SourceFields": { "base": null, "refs": { - "Task$SourceFields": null + "Task$SourceFields": "

The source fields to which a particular task is applied.

" } }, "SourceFlowConfig": { - "base": null, + "base": "

Contains information about the configuration of the source connector used in the flow.

", "refs": { - "FlowDefinition$SourceFlowConfig": null + "FlowDefinition$SourceFlowConfig": "

The configuration that controls how Customer Profiles retrieves data from the source.

" } }, "StandardIdentifier": { @@ -767,7 +767,7 @@ } }, "Task": { - "base": null, + "base": "

A class for modeling different type of tasks. Task implementation varies based on the TaskType.

", "refs": { "Tasks$member": null } @@ -775,19 +775,19 @@ "TaskPropertiesMap": { "base": null, "refs": { - "Task$TaskProperties": null + "Task$TaskProperties": "

A map used to store task-related information. The service looks for particular information based on the TaskType.

" } }, "TaskType": { "base": null, "refs": { - "Task$TaskType": null + "Task$TaskType": "

Specifies the particular task implementation that Amazon AppFlow performs.

" } }, "Tasks": { "base": null, "refs": { - "FlowDefinition$Tasks": null + "FlowDefinition$Tasks": "

A list of tasks that Customer Profiles performs while transferring the data in the flow run.

" } }, "ThrottlingException": { @@ -798,25 +798,25 @@ "Timezone": { "base": null, "refs": { - "ScheduledTriggerProperties$Timezone": null + "ScheduledTriggerProperties$Timezone": "

Specifies the time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York.

" } }, "TriggerConfig": { - "base": null, + "base": "

The trigger settings that determine how and when Amazon AppFlow runs the specified flow.

", "refs": { - "FlowDefinition$TriggerConfig": null + "FlowDefinition$TriggerConfig": "

The trigger settings that determine how and when the flow runs.

" } }, "TriggerProperties": { - "base": null, + "base": "

Specifies the configuration details that control the trigger for a flow. Currently, these settings only apply to the Scheduled trigger type.

", "refs": { - "TriggerConfig$TriggerProperties": null + "TriggerConfig$TriggerProperties": "

Specifies the configuration details of a schedule-triggered flow that you define. Currently, these settings only apply to the Scheduled trigger type.

" } }, "TriggerType": { "base": null, "refs": { - "TriggerConfig$TriggerType": null + "TriggerConfig$TriggerType": "

Specifies the type of flow trigger. It can be OnDemand, Scheduled, or Event.

" } }, "UntagResourceRequest": { @@ -867,13 +867,13 @@ "ZendeskConnectorOperator": { "base": null, "refs": { - "ConnectorOperator$Zendesk": null + "ConnectorOperator$Zendesk": "

The operation to be performed on the provided Zendesk source fields.

" } }, "ZendeskSourceProperties": { - "base": null, + "base": "

The properties that are applied when using Zendesk as a flow source.

", "refs": { - "SourceConnectorProperties$Zendesk": null + "SourceConnectorProperties$Zendesk": "

The properties that are applied when using Zendesk as a flow source.

" } }, "boolean": { @@ -883,8 +883,8 @@ "GetProfileObjectTypeTemplateResponse$AllowProfileCreation": "

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

", "PutProfileObjectTypeRequest$AllowProfileCreation": "

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

", "PutProfileObjectTypeResponse$AllowProfileCreation": "

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

", - "SalesforceSourceProperties$EnableDynamicFieldUpdate": null, - "SalesforceSourceProperties$IncludeDeletedRecords": null + "SalesforceSourceProperties$EnableDynamicFieldUpdate": "

The flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow.

", + "SalesforceSourceProperties$IncludeDeletedRecords": "

Indicates whether Amazon AppFlow includes deleted files in the flow run.

" } }, "encryptionKey": { diff --git a/models/apis/kinesis-video-archived-media/2017-09-30/docs-2.json b/models/apis/kinesis-video-archived-media/2017-09-30/docs-2.json index 6e8b8d83b4c..4c0bf415b99 100644 --- a/models/apis/kinesis-video-archived-media/2017-09-30/docs-2.json +++ b/models/apis/kinesis-video-archived-media/2017-09-30/docs-2.json @@ -194,7 +194,7 @@ "HLSDiscontinuityMode": { "base": null, "refs": { - "GetHLSStreamingSessionURLInput$DiscontinuityMode": "

Specifies when flags marking discontinuities between fragments are added to the media playlists.

Media players typically build a timeline of media content to play, based on the timestamps of each fragment. This means that if there is any overlap or gap between fragments (as is typical if HLSFragmentSelector is set to SERVER_TIMESTAMP), the media player timeline will also have small gaps between fragments in some places, and will overwrite frames in other places. Gaps in the media player timeline can cause playback to stall and overlaps can cause playback to be jittery. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the next fragment being played immediately after the previous fragment.

The following modes are supported:

The default is ALWAYS when HLSFragmentSelector is set to SERVER_TIMESTAMP, and NEVER when it is set to PRODUCER_TIMESTAMP.

" + "GetHLSStreamingSessionURLInput$DiscontinuityMode": "

Specifies when flags marking discontinuities between fragments are added to the media playlists.

Media players typically build a timeline of media content to play, based on the timestamps of each fragment. This means that if there is any overlap or gap between fragments (as is typical if HLSFragmentSelector is set to SERVER_TIMESTAMP), the media player timeline will also have small gaps between fragments in some places, and will overwrite frames in other places. Gaps in the media player timeline can cause playback to stall and overlaps can cause playback to be jittery. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the next fragment being played immediately after the previous fragment.

The following modes are supported:

The default is ALWAYS when HLSFragmentSelector is set to SERVER_TIMESTAMP, and NEVER when it is set to PRODUCER_TIMESTAMP.

" } }, "HLSDisplayFragmentTimestamp": { diff --git a/models/apis/lookoutequipment/2020-12-15/api-2.json b/models/apis/lookoutequipment/2020-12-15/api-2.json new file mode 100644 index 00000000000..4e3ac0b737b --- /dev/null +++ b/models/apis/lookoutequipment/2020-12-15/api-2.json @@ -0,0 +1,1273 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-12-15", + "endpointPrefix":"lookoutequipment", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"LookoutEquipment", + "serviceFullName":"Amazon Lookout for Equipment", + "serviceId":"LookoutEquipment", + "signatureVersion":"v4", + "targetPrefix":"AWSLookoutEquipmentFrontendService", + "uid":"lookoutequipment-2020-12-15" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "CreateInferenceScheduler":{ + "name":"CreateInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInferenceSchedulerRequest"}, + "output":{"shape":"CreateInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"CreateModelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ] + }, + "DeleteInferenceScheduler":{ + "name":"DeleteInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInferenceSchedulerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DescribeDataIngestionJob":{ + "name":"DescribeDataIngestionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataIngestionJobRequest"}, + "output":{"shape":"DescribeDataIngestionJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "DescribeInferenceScheduler":{ + "name":"DescribeInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInferenceSchedulerRequest"}, + "output":{"shape":"DescribeInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "DescribeModel":{ + "name":"DescribeModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelRequest"}, + "output":{"shape":"DescribeModelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListDataIngestionJobs":{ + "name":"ListDataIngestionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataIngestionJobsRequest"}, + "output":{"shape":"ListDataIngestionJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListInferenceExecutions":{ + "name":"ListInferenceExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInferenceExecutionsRequest"}, + "output":{"shape":"ListInferenceExecutionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListInferenceSchedulers":{ + "name":"ListInferenceSchedulers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInferenceSchedulersRequest"}, + "output":{"shape":"ListInferenceSchedulersResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListModels":{ + "name":"ListModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListModelsRequest"}, + "output":{"shape":"ListModelsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "StartDataIngestionJob":{ + "name":"StartDataIngestionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataIngestionJobRequest"}, + "output":{"shape":"StartDataIngestionJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "StartInferenceScheduler":{ + "name":"StartInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInferenceSchedulerRequest"}, + "output":{"shape":"StartInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "StopInferenceScheduler":{ + "name":"StopInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInferenceSchedulerRequest"}, + "output":{"shape":"StopInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + }, + "UpdateInferenceScheduler":{ + "name":"UpdateInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInferenceSchedulerRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + }, + "AmazonResourceArn":{ + "type":"string", + "max":1011, + "min":1 + }, + "BoundedLengthString":{ + "type":"string", + "max":5000, + "min":1, + "pattern":"[\\P{M}\\p{M}]{1,5000}" + }, + "ComponentTimestampDelimiter":{ + "type":"string", + "max":1, + "min":0, + "pattern":"^(\\-|\\_|\\s)?$" + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "DatasetSchema", + "ClientToken" + ], + "members":{ + "DatasetName":{"shape":"DatasetName"}, + "DatasetSchema":{"shape":"DatasetSchema"}, + "ServerSideKmsKeyId":{"shape":"NameOrArn"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "idempotencyToken":true + }, + "Tags":{"shape":"TagList"} + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "Status":{"shape":"DatasetStatus"} + } + }, + "CreateInferenceSchedulerRequest":{ + "type":"structure", + "required":[ + "ModelName", + "InferenceSchedulerName", + "DataUploadFrequency", + "DataInputConfiguration", + "DataOutputConfiguration", + "RoleArn", + "ClientToken" + ], + "members":{ + "ModelName":{"shape":"ModelName"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "DataDelayOffsetInMinutes":{"shape":"DataDelayOffsetInMinutes"}, + "DataUploadFrequency":{"shape":"DataUploadFrequency"}, + "DataInputConfiguration":{"shape":"InferenceInputConfiguration"}, + "DataOutputConfiguration":{"shape":"InferenceOutputConfiguration"}, + "RoleArn":{"shape":"IamRoleArn"}, + "ServerSideKmsKeyId":{"shape":"NameOrArn"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "idempotencyToken":true + }, + "Tags":{"shape":"TagList"} + } + }, + "CreateInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "Status":{"shape":"InferenceSchedulerStatus"} + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "ModelName", + "DatasetName", + "ClientToken" + ], + "members":{ + "ModelName":{"shape":"ModelName"}, + "DatasetName":{"shape":"DatasetIdentifier"}, + "DatasetSchema":{"shape":"DatasetSchema"}, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "idempotencyToken":true + }, + "TrainingDataStartTime":{"shape":"Timestamp"}, + "TrainingDataEndTime":{"shape":"Timestamp"}, + "EvaluationDataStartTime":{"shape":"Timestamp"}, + "EvaluationDataEndTime":{"shape":"Timestamp"}, + "RoleArn":{"shape":"IamRoleArn"}, + "DataPreProcessingConfiguration":{"shape":"DataPreProcessingConfiguration"}, + "ServerSideKmsKeyId":{"shape":"NameOrArn"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateModelResponse":{ + "type":"structure", + "members":{ + "ModelArn":{"shape":"ModelArn"}, + "Status":{"shape":"ModelStatus"} + } + }, + "DataDelayOffsetInMinutes":{ + "type":"long", + "max":60, + "min":0 + }, + "DataIngestionJobSummaries":{ + "type":"list", + "member":{"shape":"DataIngestionJobSummary"} + }, + "DataIngestionJobSummary":{ + "type":"structure", + "members":{ + "JobId":{"shape":"IngestionJobId"}, + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "IngestionInputConfiguration":{"shape":"IngestionInputConfiguration"}, + "Status":{"shape":"IngestionJobStatus"} + } + }, + "DataPreProcessingConfiguration":{ + "type":"structure", + "members":{ + "TargetSamplingRate":{"shape":"TargetSamplingRate"} + } + }, + "DataUploadFrequency":{ + "type":"string", + "enum":[ + "PT5M", + "PT10M", + "PT15M", + "PT30M", + "PT1H" + ] + }, + "DatasetArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/.+" + }, + "DatasetIdentifier":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "DatasetName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "DatasetSchema":{ + "type":"structure", + "members":{ + "InlineDataSchema":{ + "shape":"InlineDataSchema", + "jsonvalue":true + } + } + }, + "DatasetStatus":{ + "type":"string", + "enum":[ + "CREATED", + "INGESTION_IN_PROGRESS", + "ACTIVE" + ] + }, + "DatasetSummaries":{ + "type":"list", + "member":{"shape":"DatasetSummary"} + }, + "DatasetSummary":{ + "type":"structure", + "members":{ + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "Status":{"shape":"DatasetStatus"}, + "CreatedAt":{"shape":"Timestamp"} + } + }, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["DatasetName"], + "members":{ + "DatasetName":{"shape":"DatasetIdentifier"} + } + }, + "DeleteInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"} + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{"shape":"ModelName"} + } + }, + "DescribeDataIngestionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"IngestionJobId"} + } + }, + "DescribeDataIngestionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"IngestionJobId"}, + "DatasetArn":{"shape":"DatasetArn"}, + "IngestionInputConfiguration":{"shape":"IngestionInputConfiguration"}, + "RoleArn":{"shape":"IamRoleArn"}, + "CreatedAt":{"shape":"Timestamp"}, + "Status":{"shape":"IngestionJobStatus"}, + "FailedReason":{"shape":"BoundedLengthString"} + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["DatasetName"], + "members":{ + "DatasetName":{"shape":"DatasetIdentifier"} + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "CreatedAt":{"shape":"Timestamp"}, + "LastUpdatedAt":{"shape":"Timestamp"}, + "Status":{"shape":"DatasetStatus"}, + "Schema":{ + "shape":"InlineDataSchema", + "jsonvalue":true + }, + "ServerSideKmsKeyId":{"shape":"KmsKeyArn"}, + "IngestionInputConfiguration":{"shape":"IngestionInputConfiguration"} + } + }, + "DescribeInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"} + } + }, + "DescribeInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{"shape":"ModelArn"}, + "ModelName":{"shape":"ModelName"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "Status":{"shape":"InferenceSchedulerStatus"}, + "DataDelayOffsetInMinutes":{"shape":"DataDelayOffsetInMinutes"}, + "DataUploadFrequency":{"shape":"DataUploadFrequency"}, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"}, + "DataInputConfiguration":{"shape":"InferenceInputConfiguration"}, + "DataOutputConfiguration":{"shape":"InferenceOutputConfiguration"}, + "RoleArn":{"shape":"IamRoleArn"}, + "ServerSideKmsKeyId":{"shape":"KmsKeyArn"} + } + }, + "DescribeModelRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{"shape":"ModelName"} + } + }, + "DescribeModelResponse":{ + "type":"structure", + "members":{ + "ModelName":{"shape":"ModelName"}, + "ModelArn":{"shape":"ModelArn"}, + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "Schema":{ + "shape":"InlineDataSchema", + "jsonvalue":true + }, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "TrainingDataStartTime":{"shape":"Timestamp"}, + "TrainingDataEndTime":{"shape":"Timestamp"}, + "EvaluationDataStartTime":{"shape":"Timestamp"}, + "EvaluationDataEndTime":{"shape":"Timestamp"}, + "RoleArn":{"shape":"IamRoleArn"}, + "DataPreProcessingConfiguration":{"shape":"DataPreProcessingConfiguration"}, + "Status":{"shape":"ModelStatus"}, + "TrainingExecutionStartTime":{"shape":"Timestamp"}, + "TrainingExecutionEndTime":{"shape":"Timestamp"}, + "FailedReason":{"shape":"BoundedLengthString"}, + "ModelMetrics":{ + "shape":"ModelMetrics", + "jsonvalue":true + }, + "LastUpdatedTime":{"shape":"Timestamp"}, + "CreatedAt":{"shape":"Timestamp"}, + "ServerSideKmsKeyId":{"shape":"KmsKeyArn"} + } + }, + "FileNameTimestampFormat":{ + "type":"string", + "pattern":"^EPOCH|yyyy-MM-dd-HH-mm-ss|yyyyMMddHHmmss$" + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + }, + "IdempotenceToken":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\p{ASCII}{1,256}" + }, + "InferenceExecutionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "InferenceExecutionSummaries":{ + "type":"list", + "member":{"shape":"InferenceExecutionSummary"} + }, + "InferenceExecutionSummary":{ + "type":"structure", + "members":{ + "ModelName":{"shape":"ModelName"}, + "ModelArn":{"shape":"ModelArn"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "ScheduledStartTime":{"shape":"Timestamp"}, + "DataStartTime":{"shape":"Timestamp"}, + "DataEndTime":{"shape":"Timestamp"}, + "DataInputConfiguration":{"shape":"InferenceInputConfiguration"}, + "DataOutputConfiguration":{"shape":"InferenceOutputConfiguration"}, + "CustomerResultObject":{"shape":"S3Object"}, + "Status":{"shape":"InferenceExecutionStatus"}, + "FailedReason":{"shape":"BoundedLengthString"} + } + }, + "InferenceInputConfiguration":{ + "type":"structure", + "members":{ + "S3InputConfiguration":{"shape":"InferenceS3InputConfiguration"}, + "InputTimeZoneOffset":{"shape":"TimeZoneOffset"}, + "InferenceInputNameConfiguration":{"shape":"InferenceInputNameConfiguration"} + } + }, + "InferenceInputNameConfiguration":{ + "type":"structure", + "members":{ + "TimestampFormat":{"shape":"FileNameTimestampFormat"}, + "ComponentTimestampDelimiter":{"shape":"ComponentTimestampDelimiter"} + } + }, + "InferenceOutputConfiguration":{ + "type":"structure", + "required":["S3OutputConfiguration"], + "members":{ + "S3OutputConfiguration":{"shape":"InferenceS3OutputConfiguration"}, + "KmsKeyId":{"shape":"NameOrArn"} + } + }, + "InferenceS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Prefix":{"shape":"S3Prefix"} + } + }, + "InferenceS3OutputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Prefix":{"shape":"S3Prefix"} + } + }, + "InferenceSchedulerArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:inference-scheduler\\/.+" + }, + "InferenceSchedulerIdentifier":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "InferenceSchedulerName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "InferenceSchedulerStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "STOPPING", + "STOPPED" + ] + }, + "InferenceSchedulerSummaries":{ + "type":"list", + "member":{"shape":"InferenceSchedulerSummary"} + }, + "InferenceSchedulerSummary":{ + "type":"structure", + "members":{ + "ModelName":{"shape":"ModelName"}, + "ModelArn":{"shape":"ModelArn"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "Status":{"shape":"InferenceSchedulerStatus"}, + "DataDelayOffsetInMinutes":{"shape":"DataDelayOffsetInMinutes"}, + "DataUploadFrequency":{"shape":"DataUploadFrequency"} + } + }, + "IngestionInputConfiguration":{ + "type":"structure", + "required":["S3InputConfiguration"], + "members":{ + "S3InputConfiguration":{"shape":"IngestionS3InputConfiguration"} + } + }, + "IngestionJobId":{ + "type":"string", + "max":32, + "pattern":"[A-Fa-f0-9]{0,32}" + }, + "IngestionJobStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "IngestionS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Prefix":{"shape":"S3Prefix"} + } + }, + "InlineDataSchema":{ + "type":"string", + "max":1000000, + "min":1 + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true, + "fault":true + }, + "KmsKeyArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:aws[a-z\\-]*:kms:[a-z0-9\\-]*:\\d{12}:[\\w\\-\\/]+" + }, + "LabelsInputConfiguration":{ + "type":"structure", + "required":["S3InputConfiguration"], + "members":{ + "S3InputConfiguration":{"shape":"LabelsS3InputConfiguration"} + } + }, + "LabelsS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Prefix":{"shape":"S3Prefix"} + } + }, + "ListDataIngestionJobsRequest":{ + "type":"structure", + "members":{ + "DatasetName":{"shape":"DatasetName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "Status":{"shape":"IngestionJobStatus"} + } + }, + "ListDataIngestionJobsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "DataIngestionJobSummaries":{"shape":"DataIngestionJobSummaries"} + } + }, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "DatasetNameBeginsWith":{"shape":"DatasetName"} + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "DatasetSummaries":{"shape":"DatasetSummaries"} + } + }, + "ListInferenceExecutionsRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"}, + "DataStartTimeAfter":{"shape":"Timestamp"}, + "DataEndTimeBefore":{"shape":"Timestamp"}, + "Status":{"shape":"InferenceExecutionStatus"} + } + }, + "ListInferenceExecutionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "InferenceExecutionSummaries":{"shape":"InferenceExecutionSummaries"} + } + }, + "ListInferenceSchedulersRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "InferenceSchedulerNameBeginsWith":{"shape":"InferenceSchedulerIdentifier"}, + "ModelName":{"shape":"ModelName"} + } + }, + "ListInferenceSchedulersResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "InferenceSchedulerSummaries":{"shape":"InferenceSchedulerSummaries"} + } + }, + "ListModelsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{"shape":"MaxResults"}, + "Status":{"shape":"ModelStatus"}, + "ModelNameBeginsWith":{"shape":"ModelName"}, + "DatasetNameBeginsWith":{"shape":"DatasetName"} + } + }, + "ListModelsResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "ModelSummaries":{"shape":"ModelSummaries"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"AmazonResourceArn"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"} + } + }, + "MaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "ModelArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+" + }, + "ModelMetrics":{ + "type":"string", + "max":50000, + "min":1 + }, + "ModelName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "ModelStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "ModelSummaries":{ + "type":"list", + "member":{"shape":"ModelSummary"} + }, + "ModelSummary":{ + "type":"structure", + "members":{ + "ModelName":{"shape":"ModelName"}, + "ModelArn":{"shape":"ModelArn"}, + "DatasetName":{"shape":"DatasetName"}, + "DatasetArn":{"shape":"DatasetArn"}, + "Status":{"shape":"ModelStatus"}, + "CreatedAt":{"shape":"Timestamp"} + } + }, + "NameOrArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, + "NextToken":{ + "type":"string", + "max":8192, + "pattern":"\\p{ASCII}{0,8192}" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + }, + "S3Bucket":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\P{M}\\p{M}]{1,1024}[^/]$" + }, + "S3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Key":{"shape":"S3Key"} + } + }, + "S3Prefix":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"(^$)|([\\P{M}\\p{M}]{1,1023}/$)" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + }, + "StartDataIngestionJobRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "IngestionInputConfiguration", + "RoleArn", + "ClientToken" + ], + "members":{ + "DatasetName":{"shape":"DatasetIdentifier"}, + "IngestionInputConfiguration":{"shape":"IngestionInputConfiguration"}, + "RoleArn":{"shape":"IamRoleArn"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "idempotencyToken":true + } + } + }, + "StartDataIngestionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"IngestionJobId"}, + "Status":{"shape":"IngestionJobStatus"} + } + }, + "StartInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"} + } + }, + "StartInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{"shape":"ModelArn"}, + "ModelName":{"shape":"ModelName"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "Status":{"shape":"InferenceSchedulerStatus"} + } + }, + "StopInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"} + } + }, + "StopInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{"shape":"ModelArn"}, + "ModelName":{"shape":"ModelName"}, + "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, + "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, + "Status":{"shape":"InferenceSchedulerStatus"} + } + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{"shape":"AmazonResourceArn"}, + "Tags":{"shape":"TagList"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\s\\w+-=\\.:/@]*" + }, + "TargetSamplingRate":{ + "type":"string", + "enum":[ + "PT1S", + "PT5S", + "PT10S", + "PT15S", + "PT30S", + "PT1M", + "PT5M", + "PT10M", + "PT15M", + "PT30M", + "PT1H" + ] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + }, + "TimeZoneOffset":{ + "type":"string", + "pattern":"^(\\+|\\-)[0-9]{2}\\:[0-9]{2}$" + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{"shape":"AmazonResourceArn"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"}, + "DataDelayOffsetInMinutes":{"shape":"DataDelayOffsetInMinutes"}, + "DataUploadFrequency":{"shape":"DataUploadFrequency"}, + "DataInputConfiguration":{"shape":"InferenceInputConfiguration"}, + "DataOutputConfiguration":{"shape":"InferenceOutputConfiguration"}, + "RoleArn":{"shape":"IamRoleArn"} + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "exception":true + } + } +} diff --git a/models/apis/lookoutequipment/2020-12-15/docs-2.json b/models/apis/lookoutequipment/2020-12-15/docs-2.json new file mode 100644 index 00000000000..bcc8acf5f9e --- /dev/null +++ b/models/apis/lookoutequipment/2020-12-15/docs-2.json @@ -0,0 +1,794 @@ +{ + "version": "2.0", + "service": "

Amazon Lookout for Equipment is a machine learning service that uses advanced analytics to identify anomalies in machines from sensor data for use in predictive maintenance.

", + "operations": { + "CreateDataset": "

Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. In other words, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.

", + "CreateInferenceScheduler": "

Creates a scheduled inference. Scheduling an inference is setting up a continuous real-time inference plan to analyze new measurement data. When setting up the schedule, you provide an S3 bucket location for the input data, assign it a delimiter between separate entries in the data, set an offset delay if desired, and set the frequency of inferencing. You must also provide an S3 bucket location for the output data.

", + "CreateModel": "

Creates an ML model for data inference.

A machine-learning (ML) model is a mathematical model that finds patterns in your data. In Amazon Lookout for Equipment, the model learns the patterns of normal behavior and detects abnormal behavior that could be potential equipment failure (or maintenance events). The models are made by analyzing normal data and abnormalities in machine behavior that have already occurred.

Your model is trained using a portion of the data from your dataset and uses that data to learn patterns of normal behavior and abnormal patterns that lead to equipment failure. Another portion of the data is used to evaluate the model's accuracy.

", + "DeleteDataset": "

Deletes a dataset and associated artifacts. The operation will check to see if any inference scheduler or data ingestion job is currently using the dataset, and if there isn't, the dataset, its metadata, and any associated data stored in S3 will be deleted. This does not affect any models that used this dataset for training and evaluation, but does prevent it from being used in the future.

", + "DeleteInferenceScheduler": "

Deletes an inference scheduler that has been set up. Already processed output results are not affected.

", + "DeleteModel": "

Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up.

", + "DescribeDataIngestionJob": "

Provides information on a specific data ingestion job such as creation time, dataset ARN, status, and so on.

", + "DescribeDataset": "

Provides information on a specified dataset such as the schema location, status, and so on.

", + "DescribeInferenceScheduler": "

Specifies information about the inference scheduler being used, including name, model, status, and associated metadata

", + "DescribeModel": "

Provides overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on.

", + "ListDataIngestionJobs": "

Provides a list of all data ingestion jobs, including dataset name and ARN, S3 location of the input data, status, and so on.

", + "ListDatasets": "

Lists all datasets currently available in your account, filtering on the dataset name.

", + "ListInferenceExecutions": "

Lists all inference executions that have been performed by the specified inference scheduler.

", + "ListInferenceSchedulers": "

Retrieves a list of all inference schedulers currently available for your account.

", + "ListModels": "

Generates a list of all models in the account, including model name and ARN, dataset, and status.

", + "ListTagsForResource": "

Lists all the tags for a specified resource, including key and value.

", + "StartDataIngestionJob": "

Starts a data ingestion job. Amazon Lookout for Equipment returns the job status.

", + "StartInferenceScheduler": "

Starts an inference scheduler.

", + "StopInferenceScheduler": "

Stops an inference scheduler.

", + "TagResource": "

Associates a given tag to a resource in your account. A tag is a key-value pair which can be added to an Amazon Lookout for Equipment resource as metadata. Tags can be used for organizing your resources as well as helping you to search and filter by tag. Multiple tags can be added to a resource, either when you create it, or later. Up to 50 tags can be associated with each resource.

", + "UntagResource": "

Removes a specific tag from a given resource. The tag is specified by its key.

", + "UpdateInferenceScheduler": "

Updates an inference scheduler.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

The request could not be completed because you do not have access to the resource.

", + "refs": { + } + }, + "AmazonResourceArn": { + "base": null, + "refs": { + "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource (such as the dataset or model) that is the focus of the ListTagsForResource operation.

", + "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the specific resource to which the tag should be associated.

", + "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource to which the tag is currently associated.

" + } + }, + "BoundedLengthString": { + "base": null, + "refs": { + "AccessDeniedException$Message": null, + "ConflictException$Message": null, + "DescribeDataIngestionJobResponse$FailedReason": "

Specifies the reason for failure when a data ingestion job has failed.

", + "DescribeModelResponse$FailedReason": "

If the training of the ML model failed, this indicates the reason for that failure.

", + "InferenceExecutionSummary$FailedReason": "

Specifies the reason for failure when an inference execution has failed.

", + "InternalServerException$Message": null, + "ResourceNotFoundException$Message": null, + "ServiceQuotaExceededException$Message": null, + "ThrottlingException$Message": null, + "ValidationException$Message": null + } + }, + "ComponentTimestampDelimiter": { + "base": null, + "refs": { + "InferenceInputNameConfiguration$ComponentTimestampDelimiter": "

Indicates the delimiter character used between items in the data.

" + } + }, + "ConflictException": { + "base": "

The request could not be completed due to a conflict with the current state of the target resource.

", + "refs": { + } + }, + "CreateDatasetRequest": { + "base": null, + "refs": { + } + }, + "CreateDatasetResponse": { + "base": null, + "refs": { + } + }, + "CreateInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "CreateInferenceSchedulerResponse": { + "base": null, + "refs": { + } + }, + "CreateModelRequest": { + "base": null, + "refs": { + } + }, + "CreateModelResponse": { + "base": null, + "refs": { + } + }, + "DataDelayOffsetInMinutes": { + "base": null, + "refs": { + "CreateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "

A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

", + "DescribeInferenceSchedulerResponse$DataDelayOffsetInMinutes": "

A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

", + "InferenceSchedulerSummary$DataDelayOffsetInMinutes": "

> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if an offset delay time of five minutes was selected, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

", + "UpdateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "

> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

" + } + }, + "DataIngestionJobSummaries": { + "base": null, + "refs": { + "ListDataIngestionJobsResponse$DataIngestionJobSummaries": "

Specifies information about the specific data ingestion job, including dataset name and status.

" + } + }, + "DataIngestionJobSummary": { + "base": "

Provides information about a specified data ingestion job, including dataset information, data ingestion configuration, and status.

", + "refs": { + "DataIngestionJobSummaries$member": null + } + }, + "DataPreProcessingConfiguration": { + "base": "

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

", + "refs": { + "CreateModelRequest$DataPreProcessingConfiguration": "

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

", + "DescribeModelResponse$DataPreProcessingConfiguration": "

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + } + }, + "DataUploadFrequency": { + "base": null, + "refs": { + "CreateInferenceSchedulerRequest$DataUploadFrequency": "

How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

", + "DescribeInferenceSchedulerResponse$DataUploadFrequency": "

Specifies how often data is uploaded to the source S3 bucket for the input data. This value is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

", + "InferenceSchedulerSummary$DataUploadFrequency": "

How often data is uploaded to the source S3 bucket for the input data. This value is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

", + "UpdateInferenceSchedulerRequest$DataUploadFrequency": "

How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

" + } + }, + "DatasetArn": { + "base": null, + "refs": { + "CreateDatasetResponse$DatasetArn": "

The Amazon Resource Name (ARN) of the dataset being created.

", + "DataIngestionJobSummary$DatasetArn": "

The Amazon Resource Name (ARN) of the dataset used in the data ingestion job.

", + "DatasetSummary$DatasetArn": "

The Amazon Resource Name (ARN) of the specified dataset.

", + "DescribeDataIngestionJobResponse$DatasetArn": "

The Amazon Resource Name (ARN) of the dataset being used in the data ingestion job.

", + "DescribeDatasetResponse$DatasetArn": "

The Amazon Resource Name (ARN) of the dataset being described.

", + "DescribeModelResponse$DatasetArn": "

The Amazon Resouce Name (ARN) of the dataset used to create the ML model being described.

", + "ModelSummary$DatasetArn": "

The Amazon Resource Name (ARN) of the dataset used to create the model.

" + } + }, + "DatasetIdentifier": { + "base": null, + "refs": { + "CreateModelRequest$DatasetName": "

The name of the dataset for the ML model being created.

", + "DeleteDatasetRequest$DatasetName": "

The name of the dataset to be deleted.

", + "DescribeDatasetRequest$DatasetName": "

The name of the dataset to be described.

", + "StartDataIngestionJobRequest$DatasetName": "

The name of the dataset being used by the data ingestion job.

" + } + }, + "DatasetName": { + "base": null, + "refs": { + "CreateDatasetRequest$DatasetName": "

The name of the dataset being created.

", + "CreateDatasetResponse$DatasetName": "

The name of the dataset being created.

", + "DataIngestionJobSummary$DatasetName": "

The name of the dataset used for the data ingestion job.

", + "DatasetSummary$DatasetName": "

The name of the dataset.

", + "DescribeDatasetResponse$DatasetName": "

The name of the dataset being described.

", + "DescribeModelResponse$DatasetName": "

The name of the dataset being used by the ML being described.

", + "ListDataIngestionJobsRequest$DatasetName": "

The name of the dataset being used for the data ingestion job.

", + "ListDatasetsRequest$DatasetNameBeginsWith": "

The beginning of the name of the datasets to be listed.

", + "ListModelsRequest$DatasetNameBeginsWith": "

The beginning of the name of the dataset of the ML models to be listed.

", + "ModelSummary$DatasetName": "

The name of the dataset being used for the ML model.

" + } + }, + "DatasetSchema": { + "base": "

Provides information about the data schema used with the given dataset.

", + "refs": { + "CreateDatasetRequest$DatasetSchema": "

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

", + "CreateModelRequest$DatasetSchema": "

The data schema for the ML model being created.

" + } + }, + "DatasetStatus": { + "base": null, + "refs": { + "CreateDatasetResponse$Status": "

Indicates the status of the CreateDataset operation.

", + "DatasetSummary$Status": "

Indicates the status of the dataset.

", + "DescribeDatasetResponse$Status": "

Indicates the status of the dataset.

" + } + }, + "DatasetSummaries": { + "base": null, + "refs": { + "ListDatasetsResponse$DatasetSummaries": "

Provides information about the specified dataset, including creation time, dataset ARN, and status.

" + } + }, + "DatasetSummary": { + "base": "

Contains information about the specific data set, including name, ARN, and status.

", + "refs": { + "DatasetSummaries$member": null + } + }, + "DeleteDatasetRequest": { + "base": null, + "refs": { + } + }, + "DeleteInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "DeleteModelRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataIngestionJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataIngestionJobResponse": { + "base": null, + "refs": { + } + }, + "DescribeDatasetRequest": { + "base": null, + "refs": { + } + }, + "DescribeDatasetResponse": { + "base": null, + "refs": { + } + }, + "DescribeInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "DescribeInferenceSchedulerResponse": { + "base": null, + "refs": { + } + }, + "DescribeModelRequest": { + "base": null, + "refs": { + } + }, + "DescribeModelResponse": { + "base": null, + "refs": { + } + }, + "FileNameTimestampFormat": { + "base": null, + "refs": { + "InferenceInputNameConfiguration$TimestampFormat": "

The format of the timestamp, whether Epoch time, or standard, with or without hyphens (-).

" + } + }, + "IamRoleArn": { + "base": null, + "refs": { + "CreateInferenceSchedulerRequest$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference.

", + "CreateModelRequest$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the ML model.

", + "DescribeDataIngestionJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role with permission to access the data source being ingested.

", + "DescribeInferenceSchedulerResponse$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler being described.

", + "DescribeModelResponse$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source for the ML model being described.

", + "StartDataIngestionJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source for the data ingestion job.

", + "UpdateInferenceSchedulerRequest$RoleArn": "

The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler.

" + } + }, + "IdempotenceToken": { + "base": null, + "refs": { + "CreateDatasetRequest$ClientToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "CreateInferenceSchedulerRequest$ClientToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "CreateModelRequest$ClientToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "StartDataIngestionJobRequest$ClientToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

" + } + }, + "InferenceExecutionStatus": { + "base": null, + "refs": { + "InferenceExecutionSummary$Status": "

Indicates the status of the inference execution.

", + "ListInferenceExecutionsRequest$Status": "

The status of the inference execution.

" + } + }, + "InferenceExecutionSummaries": { + "base": null, + "refs": { + "ListInferenceExecutionsResponse$InferenceExecutionSummaries": "

Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

" + } + }, + "InferenceExecutionSummary": { + "base": "

Contains information about the specific inference execution, including input and output data configuration, inference scheduling information, status, and so on.

", + "refs": { + "InferenceExecutionSummaries$member": null + } + }, + "InferenceInputConfiguration": { + "base": "

> Specifies configuration information for the input data for the inference, including S3 location of input data..

", + "refs": { + "CreateInferenceSchedulerRequest$DataInputConfiguration": "

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

", + "DescribeInferenceSchedulerResponse$DataInputConfiguration": "

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

", + "InferenceExecutionSummary$DataInputConfiguration": "

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

", + "UpdateInferenceSchedulerRequest$DataInputConfiguration": "

Specifies information for the input data for the inference scheduler, including delimiter, format, and dataset location.

" + } + }, + "InferenceInputNameConfiguration": { + "base": "

>> Specifies configuration information for the input data for the inference, including timestamp format and delimiter.

", + "refs": { + "InferenceInputConfiguration$InferenceInputNameConfiguration": "

> Specifies configuration information for the input data for the inference, including timestamp format and delimiter.

" + } + }, + "InferenceOutputConfiguration": { + "base": "

Specifies configuration information for the output results from for the inference, including KMS key ID and output S3 location.

", + "refs": { + "CreateInferenceSchedulerRequest$DataOutputConfiguration": "

Specifies configuration information for the output results for the inference scheduler, including the S3 location for the output.

", + "DescribeInferenceSchedulerResponse$DataOutputConfiguration": "

Specifies information for the output results for the inference scheduler, including the output S3 location.

", + "InferenceExecutionSummary$DataOutputConfiguration": "

Specifies configuration information for the output results from for the inference execution, including the output S3 location.

", + "UpdateInferenceSchedulerRequest$DataOutputConfiguration": "

Specifies information for the output results from the inference scheduler, including the output S3 location.

" + } + }, + "InferenceS3InputConfiguration": { + "base": "

Specifies configuration information for the input data for the inference, including input data S3 location.

", + "refs": { + "InferenceInputConfiguration$S3InputConfiguration": "

Specifies configuration information for the input data for the inference, including S3 location of input data..

" + } + }, + "InferenceS3OutputConfiguration": { + "base": "

Specifies configuration information for the output results from the inference, including output S3 location.

", + "refs": { + "InferenceOutputConfiguration$S3OutputConfiguration": "

Specifies configuration information for the output results from for the inference, output S3 location.

" + } + }, + "InferenceSchedulerArn": { + "base": null, + "refs": { + "CreateInferenceSchedulerResponse$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference scheduler being created.

", + "DescribeInferenceSchedulerResponse$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference scheduler being described.

", + "InferenceExecutionSummary$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference scheduler being used for the inference execution.

", + "InferenceSchedulerSummary$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference scheduler.

", + "StartInferenceSchedulerResponse$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference scheduler being started.

", + "StopInferenceSchedulerResponse$InferenceSchedulerArn": "

The Amazon Resource Name (ARN) of the inference schedule being stopped.

" + } + }, + "InferenceSchedulerIdentifier": { + "base": null, + "refs": { + "DeleteInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler to be deleted.

", + "DescribeInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler being described.

", + "ListInferenceExecutionsRequest$InferenceSchedulerName": "

The name of the inference scheduler for the inference execution listed.

", + "ListInferenceSchedulersRequest$InferenceSchedulerNameBeginsWith": "

The beginning of the name of the inference schedulers to be listed.

", + "StartInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler to be started.

", + "StopInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler to be stopped.

", + "UpdateInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler to be updated.

" + } + }, + "InferenceSchedulerName": { + "base": null, + "refs": { + "CreateInferenceSchedulerRequest$InferenceSchedulerName": "

The name of the inference scheduler being created.

", + "CreateInferenceSchedulerResponse$InferenceSchedulerName": "

The name of inference scheduler being created.

", + "DescribeInferenceSchedulerResponse$InferenceSchedulerName": "

The name of the inference scheduler being described.

", + "InferenceExecutionSummary$InferenceSchedulerName": "

The name of the inference scheduler being used for the inference execution.

", + "InferenceSchedulerSummary$InferenceSchedulerName": "

The name of the inference scheduler.

", + "StartInferenceSchedulerResponse$InferenceSchedulerName": "

The name of the inference scheduler being started.

", + "StopInferenceSchedulerResponse$InferenceSchedulerName": "

The name of the inference scheduler being stopped.

" + } + }, + "InferenceSchedulerStatus": { + "base": null, + "refs": { + "CreateInferenceSchedulerResponse$Status": "

Indicates the status of the CreateInferenceScheduler operation.

", + "DescribeInferenceSchedulerResponse$Status": "

Indicates the status of the inference scheduler.

", + "InferenceSchedulerSummary$Status": "

Indicates the status of the inference scheduler.

", + "StartInferenceSchedulerResponse$Status": "

Indicates the status of the inference scheduler.

", + "StopInferenceSchedulerResponse$Status": "

Indicates the status of the inference scheduler.

" + } + }, + "InferenceSchedulerSummaries": { + "base": null, + "refs": { + "ListInferenceSchedulersResponse$InferenceSchedulerSummaries": "

Provides information about the specified inference scheduler, including data upload frequency, model name and ARN, and status.

" + } + }, + "InferenceSchedulerSummary": { + "base": "

Contains information about the specific inference scheduler, including data delay offset, model name and ARN, status, and so on.

", + "refs": { + "InferenceSchedulerSummaries$member": null + } + }, + "IngestionInputConfiguration": { + "base": "

Specifies configuration information for the input data for the data ingestion job, including input data S3 location.

", + "refs": { + "DataIngestionJobSummary$IngestionInputConfiguration": "

Specifies information for the input data for the data inference job, including data S3 location parameters.

", + "DescribeDataIngestionJobResponse$IngestionInputConfiguration": "

Specifies the S3 location configuration for the data input for the data ingestion job.

", + "DescribeDatasetResponse$IngestionInputConfiguration": "

Specifies the S3 location configuration for the data input for the data ingestion job.

", + "StartDataIngestionJobRequest$IngestionInputConfiguration": "

Specifies information for the input data for the data ingestion job, including dataset S3 location.

" + } + }, + "IngestionJobId": { + "base": null, + "refs": { + "DataIngestionJobSummary$JobId": "

Indicates the job ID of the data ingestion job.

", + "DescribeDataIngestionJobRequest$JobId": "

The job ID of the data ingestion job.

", + "DescribeDataIngestionJobResponse$JobId": "

Indicates the job ID of the data ingestion job.

", + "StartDataIngestionJobResponse$JobId": "

Indicates the job ID of the data ingestion job.

" + } + }, + "IngestionJobStatus": { + "base": null, + "refs": { + "DataIngestionJobSummary$Status": "

Indicates the status of the data ingestion job.

", + "DescribeDataIngestionJobResponse$Status": "

Indicates the status of the DataIngestionJob operation.

", + "ListDataIngestionJobsRequest$Status": "

Indicates the status of the data ingestion job.

", + "StartDataIngestionJobResponse$Status": "

Indicates the status of the StartDataIngestionJob operation.

" + } + }, + "IngestionS3InputConfiguration": { + "base": "

Specifies S3 configuration information for the input data for the data ingestion job.

", + "refs": { + "IngestionInputConfiguration$S3InputConfiguration": "

The location information for the S3 bucket used for input data for the data ingestion.

" + } + }, + "InlineDataSchema": { + "base": null, + "refs": { + "DatasetSchema$InlineDataSchema": "

", + "DescribeDatasetResponse$Schema": "

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

", + "DescribeModelResponse$Schema": "

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

" + } + }, + "InternalServerException": { + "base": "

Processing of the request has failed because of an unknown error, exception or failure.

", + "refs": { + } + }, + "KmsKeyArn": { + "base": null, + "refs": { + "DescribeDatasetResponse$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment.

", + "DescribeInferenceSchedulerResponse$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment.

", + "DescribeModelResponse$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment.

" + } + }, + "LabelsInputConfiguration": { + "base": "

Contains the configuration information for the S3 location being used to hold label data.

", + "refs": { + "CreateModelRequest$LabelsInputConfiguration": "

The input configuration for the labels being used for the ML model that's being created.

", + "DescribeModelResponse$LabelsInputConfiguration": "

Specifies configuration information about the labels input, including its S3 location.

" + } + }, + "LabelsS3InputConfiguration": { + "base": "

The location information (prefix and bucket name) for the s3 location being used for label data.

", + "refs": { + "LabelsInputConfiguration$S3InputConfiguration": "

Contains location information for the S3 location being used for label data.

" + } + }, + "ListDataIngestionJobsRequest": { + "base": null, + "refs": { + } + }, + "ListDataIngestionJobsResponse": { + "base": null, + "refs": { + } + }, + "ListDatasetsRequest": { + "base": null, + "refs": { + } + }, + "ListDatasetsResponse": { + "base": null, + "refs": { + } + }, + "ListInferenceExecutionsRequest": { + "base": null, + "refs": { + } + }, + "ListInferenceExecutionsResponse": { + "base": null, + "refs": { + } + }, + "ListInferenceSchedulersRequest": { + "base": null, + "refs": { + } + }, + "ListInferenceSchedulersResponse": { + "base": null, + "refs": { + } + }, + "ListModelsRequest": { + "base": null, + "refs": { + } + }, + "ListModelsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListDataIngestionJobsRequest$MaxResults": "

Specifies the maximum number of data ingestion jobs to list.

", + "ListDatasetsRequest$MaxResults": "

Specifies the maximum number of datasets to list.

", + "ListInferenceExecutionsRequest$MaxResults": "

Specifies the maximum number of inference executions to list.

", + "ListInferenceSchedulersRequest$MaxResults": "

Specifies the maximum number of inference schedulers to list.

", + "ListModelsRequest$MaxResults": "

Specifies the maximum number of ML models to list.

" + } + }, + "ModelArn": { + "base": null, + "refs": { + "CreateModelResponse$ModelArn": "

The Amazon Resource Name (ARN) of the model being created.

", + "DescribeInferenceSchedulerResponse$ModelArn": "

The Amazon Resource Name (ARN) of the ML model of the inference scheduler being described.

", + "DescribeModelResponse$ModelArn": "

The Amazon Resource Name (ARN) of the ML model being described.

", + "InferenceExecutionSummary$ModelArn": "

The Amazon Resource Name (ARN) of the ML model used for the inference execution.

", + "InferenceSchedulerSummary$ModelArn": "

The Amazon Resource Name (ARN) of the ML model used by the inference scheduler.

", + "ModelSummary$ModelArn": "

The Amazon Resource Name (ARN) of the ML model.

", + "StartInferenceSchedulerResponse$ModelArn": "

The Amazon Resource Name (ARN) of the ML model being used by the inference scheduler.

", + "StopInferenceSchedulerResponse$ModelArn": "

The Amazon Resource Name (ARN) of the ML model used by the inference scheduler being stopped.

" + } + }, + "ModelMetrics": { + "base": null, + "refs": { + "DescribeModelResponse$ModelMetrics": "

The Model Metrics show an aggregated summary of the model's performance within the evaluation time range. This is the JSON content of the metrics created when evaluating the model.

" + } + }, + "ModelName": { + "base": null, + "refs": { + "CreateInferenceSchedulerRequest$ModelName": "

The name of the previously trained ML model being used to create the inference scheduler.

", + "CreateModelRequest$ModelName": "

The name for the ML model to be created.

", + "DeleteModelRequest$ModelName": "

The name of the ML model to be deleted.

", + "DescribeInferenceSchedulerResponse$ModelName": "

The name of the ML model of the inference scheduler being described.

", + "DescribeModelRequest$ModelName": "

The name of the ML model to be described.

", + "DescribeModelResponse$ModelName": "

The name of the ML model being described.

", + "InferenceExecutionSummary$ModelName": "

The name of the ML model being used for the inference execution.

", + "InferenceSchedulerSummary$ModelName": "

The name of the ML model used for the inference scheduler.

", + "ListInferenceSchedulersRequest$ModelName": "

The name of the ML model used by the inference scheduler to be listed.

", + "ListModelsRequest$ModelNameBeginsWith": "

The beginning of the name of the ML models being listed.

", + "ModelSummary$ModelName": "

The name of the ML model.

", + "StartInferenceSchedulerResponse$ModelName": "

The name of the ML model being used by the inference scheduler.

", + "StopInferenceSchedulerResponse$ModelName": "

The name of the ML model used by the inference scheduler being stopped.

" + } + }, + "ModelStatus": { + "base": null, + "refs": { + "CreateModelResponse$Status": "

Indicates the status of the CreateModel operation.

", + "DescribeModelResponse$Status": "

Specifies the current status of the model being described. Status describes the status of the most recent action of the model.

", + "ListModelsRequest$Status": "

The status of the ML model.

", + "ModelSummary$Status": "

Indicates the status of the ML model.

" + } + }, + "ModelSummaries": { + "base": null, + "refs": { + "ListModelsResponse$ModelSummaries": "

Provides information on the specified model, including created time, model and dataset ARNs, and status.

" + } + }, + "ModelSummary": { + "base": "

Provides information about the specified ML model, including dataset and model names and ARNs, as well as status.

", + "refs": { + "ModelSummaries$member": null + } + }, + "NameOrArn": { + "base": null, + "refs": { + "CreateDatasetRequest$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment.

", + "CreateInferenceSchedulerRequest$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment.

", + "CreateModelRequest$ServerSideKmsKeyId": "

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment.

", + "InferenceOutputConfiguration$KmsKeyId": "

The ID number for the AWS KMS key used to encrypt the inference output.

" + } + }, + "NextToken": { + "base": null, + "refs": { + "ListDataIngestionJobsRequest$NextToken": "

An opaque pagination token indicating where to continue the listing of data ingestion jobs.

", + "ListDataIngestionJobsResponse$NextToken": "

An opaque pagination token indicating where to continue the listing of data ingestion jobs.

", + "ListDatasetsRequest$NextToken": "

An opaque pagination token indicating where to continue the listing of datasets.

", + "ListDatasetsResponse$NextToken": "

An opaque pagination token indicating where to continue the listing of datasets.

", + "ListInferenceExecutionsRequest$NextToken": "

An opaque pagination token indicating where to continue the listing of inference executions.

", + "ListInferenceExecutionsResponse$NextToken": "

An opaque pagination token indicating where to continue the listing of inference executions.

", + "ListInferenceSchedulersRequest$NextToken": "

An opaque pagination token indicating where to continue the listing of inference schedulers.

", + "ListInferenceSchedulersResponse$NextToken": "

An opaque pagination token indicating where to continue the listing of inference schedulers.

", + "ListModelsRequest$NextToken": "

An opaque pagination token indicating where to continue the listing of ML models.

", + "ListModelsResponse$NextToken": "

An opaque pagination token indicating where to continue the listing of ML models.

" + } + }, + "ResourceNotFoundException": { + "base": "

The resource requested could not be found. Verify the resource ID and retry your request.

", + "refs": { + } + }, + "S3Bucket": { + "base": null, + "refs": { + "InferenceS3InputConfiguration$Bucket": "

The bucket containing the input dataset for the inference.

", + "InferenceS3OutputConfiguration$Bucket": "

The bucket containing the output results from the inference

", + "IngestionS3InputConfiguration$Bucket": "

The name of the S3 bucket used for the input data for the data ingestion.

", + "LabelsS3InputConfiguration$Bucket": "

The name of the S3 bucket holding the label data.

", + "S3Object$Bucket": "

The name of the specific S3 bucket.

" + } + }, + "S3Key": { + "base": null, + "refs": { + "S3Object$Key": "

The AWS Key Management Service (AWS KMS) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.

" + } + }, + "S3Object": { + "base": "

Contains information about an S3 bucket.

", + "refs": { + "InferenceExecutionSummary$CustomerResultObject": "

" + } + }, + "S3Prefix": { + "base": null, + "refs": { + "InferenceS3InputConfiguration$Prefix": "

The prefix for the S3 bucket used for the input data for the inference.

", + "InferenceS3OutputConfiguration$Prefix": "

The prefix for the S3 bucket used for the output results from the inference.

", + "IngestionS3InputConfiguration$Prefix": "

The prefix for the S3 location being used for the input data for the data ingestion.

", + "LabelsS3InputConfiguration$Prefix": "

The prefix for the S3 bucket used for the label data.

" + } + }, + "ServiceQuotaExceededException": { + "base": "

Resource limitations have been exceeded.

", + "refs": { + } + }, + "StartDataIngestionJobRequest": { + "base": null, + "refs": { + } + }, + "StartDataIngestionJobResponse": { + "base": null, + "refs": { + } + }, + "StartInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "StartInferenceSchedulerResponse": { + "base": null, + "refs": { + } + }, + "StopInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "StopInferenceSchedulerResponse": { + "base": null, + "refs": { + } + }, + "Tag": { + "base": "

A tag is a key-value pair that can be added to a resource as metadata.

", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

The key for the specified tag.

", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "

Specifies the key of the tag to be removed from a specified resource.

" + } + }, + "TagList": { + "base": null, + "refs": { + "CreateDatasetRequest$Tags": "

Any tags associated with the ingested data described in the dataset.

", + "CreateInferenceSchedulerRequest$Tags": "

Any tags associated with the inference scheduler.

", + "CreateModelRequest$Tags": "

Any tags associated with the ML model being created.

", + "ListTagsForResourceResponse$Tags": "

Any tags associated with the resource.

", + "TagResourceRequest$Tags": "

The tag or tags to be associated with a specific resource. Both the tag key and value are specified.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

The value for the specified tag.

" + } + }, + "TargetSamplingRate": { + "base": null, + "refs": { + "DataPreProcessingConfiguration$TargetSamplingRate": "

The sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + } + }, + "ThrottlingException": { + "base": "

The request was denied due to request throttling.

", + "refs": { + } + }, + "TimeZoneOffset": { + "base": null, + "refs": { + "InferenceInputConfiguration$InputTimeZoneOffset": "

Indicates the difference between your time zone and Greenwich Mean Time (GMT).

" + } + }, + "Timestamp": { + "base": null, + "refs": { + "CreateModelRequest$TrainingDataStartTime": "

Indicates the time reference in the dataset that should be used to begin the subset of training data for the ML model.

", + "CreateModelRequest$TrainingDataEndTime": "

Indicates the time reference in the dataset that should be used to end the subset of training data for the ML model.

", + "CreateModelRequest$EvaluationDataStartTime": "

Indicates the time reference in the dataset that should be used to begin the subset of evaluation data for the ML model.

", + "CreateModelRequest$EvaluationDataEndTime": "

Indicates the time reference in the dataset that should be used to end the subset of evaluation data for the ML model.

", + "DatasetSummary$CreatedAt": "

The time at which the dataset was created in Amazon Lookout for Equipment.

", + "DescribeDataIngestionJobResponse$CreatedAt": "

The time at which the data ingestion job was created.

", + "DescribeDatasetResponse$CreatedAt": "

Specifies the time the dataset was created in Amazon Lookout for Equipment.

", + "DescribeDatasetResponse$LastUpdatedAt": "

Specifies the time the dataset was last updated, if it was.

", + "DescribeInferenceSchedulerResponse$CreatedAt": "

Specifies the time at which the inference scheduler was created.

", + "DescribeInferenceSchedulerResponse$UpdatedAt": "

Specifies the time at which the inference scheduler was last updated, if it was.

", + "DescribeModelResponse$TrainingDataStartTime": "

Indicates the time reference in the dataset that was used to begin the subset of training data for the ML model.

", + "DescribeModelResponse$TrainingDataEndTime": "

Indicates the time reference in the dataset that was used to end the subset of training data for the ML model.

", + "DescribeModelResponse$EvaluationDataStartTime": "

Indicates the time reference in the dataset that was used to begin the subset of evaluation data for the ML model.

", + "DescribeModelResponse$EvaluationDataEndTime": "

Indicates the time reference in the dataset that was used to end the subset of evaluation data for the ML model.

", + "DescribeModelResponse$TrainingExecutionStartTime": "

Indicates the time at which the training of the ML model began.

", + "DescribeModelResponse$TrainingExecutionEndTime": "

Indicates the time at which the training of the ML model was completed.

", + "DescribeModelResponse$LastUpdatedTime": "

Indicates the last time the ML model was updated. The type of update is not specified.

", + "DescribeModelResponse$CreatedAt": "

Indicates the time and date at which the ML model was created.

", + "InferenceExecutionSummary$ScheduledStartTime": "

Indicates the start time at which the inference scheduler began the specific inference execution.

", + "InferenceExecutionSummary$DataStartTime": "

Indicates the time reference in the dataset at which the inference execution began.

", + "InferenceExecutionSummary$DataEndTime": "

Indicates the time reference in the dataset at which the inference execution stopped.

", + "ListInferenceExecutionsRequest$DataStartTimeAfter": "

The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution.

", + "ListInferenceExecutionsRequest$DataEndTimeBefore": "

The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution.

", + "ModelSummary$CreatedAt": "

The time at which the specific model was created.

" + } + }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, + "UpdateInferenceSchedulerRequest": { + "base": null, + "refs": { + } + }, + "ValidationException": { + "base": "

The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related AWS service that's being utilized.

", + "refs": { + } + } + } +} diff --git a/models/apis/lookoutequipment/2020-12-15/examples-1.json b/models/apis/lookoutequipment/2020-12-15/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/lookoutequipment/2020-12-15/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/lookoutequipment/2020-12-15/paginators-1.json b/models/apis/lookoutequipment/2020-12-15/paginators-1.json new file mode 100644 index 00000000000..4e4242164a8 --- /dev/null +++ b/models/apis/lookoutequipment/2020-12-15/paginators-1.json @@ -0,0 +1,29 @@ +{ + "pagination": { + "ListDataIngestionJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDatasets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListInferenceExecutions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListInferenceSchedulers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListModels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/models/apis/ram/2018-01-04/docs-2.json b/models/apis/ram/2018-01-04/docs-2.json index b2cbb314851..7ba09d326c3 100644 --- a/models/apis/ram/2018-01-04/docs-2.json +++ b/models/apis/ram/2018-01-04/docs-2.json @@ -352,7 +352,7 @@ "PrincipalArnOrIdList": { "base": null, "refs": { - "AssociateResourceShareRequest$principals": "

The principals.

", + "AssociateResourceShareRequest$principals": "

The principals to associate with the resource share. The possible values are IDs of AWS accounts, and the ARNs of organizational units (OU) or organizations from AWS Organizations.

", "CreateResourceShareRequest$principals": "

The principals to associate with the resource share. The possible values are IDs of AWS accounts, the ARN of an OU or organization from AWS Organizations.

", "DisassociateResourceShareRequest$principals": "

The principals.

", "ListPrincipalsRequest$principals": "

The principals.

" @@ -637,7 +637,7 @@ "ListPermissionsRequest$nextToken": "

The token for the next page of results.

", "ListPermissionsResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListPrincipalsRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource.

", - "ListPrincipalsRequest$resourceType": "

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

", + "ListPrincipalsRequest$resourceType": "

The resource type.

Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule

", "ListPrincipalsRequest$nextToken": "

The token for the next page of results.

", "ListPrincipalsResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListResourceSharePermissionsRequest$resourceShareArn": "

The Amazon Resource Name (ARN) of the resource share.

", @@ -646,7 +646,7 @@ "ListResourceTypesRequest$nextToken": "

The token for the next page of results.

", "ListResourceTypesResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "ListResourcesRequest$principal": "

The principal.

", - "ListResourcesRequest$resourceType": "

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

", + "ListResourcesRequest$resourceType": "

The resource type.

Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule

", "ListResourcesRequest$nextToken": "

The token for the next page of results.

", "ListResourcesResponse$nextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "MalformedArnException$message": null, diff --git a/models/apis/robomaker/2018-06-29/api-2.json b/models/apis/robomaker/2018-06-29/api-2.json index 007a76336e5..3a9baff5ad8 100644 --- a/models/apis/robomaker/2018-06-29/api-2.json +++ b/models/apis/robomaker/2018-06-29/api-2.json @@ -1503,7 +1503,11 @@ "PostLaunchFileFailure", "BadPermissionError", "DownloadConditionFailed", - "InternalServerError" + "BadLambdaAssociated", + "InternalServerError", + "RobotApplicationDoesNotExist", + "DeploymentFleetDoesNotExist", + "FleetDeploymentTimeout" ] }, "DeploymentJobs":{ @@ -1826,6 +1830,13 @@ "min":1, "pattern":".*" }, + "ExitBehavior":{ + "type":"string", + "enum":[ + "FAIL", + "RESTART" + ] + }, "FailedAt":{"type":"timestamp"}, "FailedCreateSimulationJobRequest":{ "type":"structure", @@ -2369,7 +2380,9 @@ "applicationVersion":{"shape":"Version"}, "launchConfig":{"shape":"LaunchConfig"}, "uploadConfigurations":{"shape":"UploadConfigurations"}, - "useDefaultUploadConfigurations":{"shape":"BoxedBoolean"} + "useDefaultUploadConfigurations":{"shape":"BoxedBoolean"}, + "tools":{"shape":"Tools"}, + "useDefaultTools":{"shape":"BoxedBoolean"} } }, "RobotApplicationConfigs":{ @@ -2534,7 +2547,9 @@ "launchConfig":{"shape":"LaunchConfig"}, "uploadConfigurations":{"shape":"UploadConfigurations"}, "worldConfigs":{"shape":"WorldConfigs"}, - "useDefaultUploadConfigurations":{"shape":"BoxedBoolean"} + "useDefaultUploadConfigurations":{"shape":"BoxedBoolean"}, + "tools":{"shape":"Tools"}, + "useDefaultTools":{"shape":"BoxedBoolean"} } }, "SimulationApplicationConfigs":{ @@ -2630,6 +2645,8 @@ "InternalServiceError", "RobotApplicationCrash", "SimulationApplicationCrash", + "RobotApplicationHealthCheckFailure", + "SimulationApplicationHealthCheckFailure", "BadPermissionsRobotApplication", "BadPermissionsSimulationApplication", "BadPermissionsS3Object", @@ -2641,6 +2658,7 @@ "InvalidBundleRobotApplication", "InvalidBundleSimulationApplication", "InvalidS3Resource", + "ThrottlingError", "LimitExceeded", "MismatchedEtag", "RobotApplicationVersionMismatchedEtag", @@ -2906,6 +2924,32 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Tool":{ + "type":"structure", + "required":[ + "name", + "command" + ], + "members":{ + "streamUI":{"shape":"BoxedBoolean"}, + "name":{"shape":"Name"}, + "command":{"shape":"UnrestrictedCommand"}, + "streamOutputToCloudWatch":{"shape":"BoxedBoolean"}, + "exitBehavior":{"shape":"ExitBehavior"} + } + }, + "Tools":{ + "type":"list", + "member":{"shape":"Tool"}, + "max":10, + "min":0 + }, + "UnrestrictedCommand":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, "UntagResourceRequest":{ "type":"structure", "required":[ diff --git a/models/apis/robomaker/2018-06-29/docs-2.json b/models/apis/robomaker/2018-06-29/docs-2.json index 9351d19c72c..c2cbe16b250 100644 --- a/models/apis/robomaker/2018-06-29/docs-2.json +++ b/models/apis/robomaker/2018-06-29/docs-2.json @@ -243,8 +243,12 @@ "refs": { "LoggingConfig$recordAllRosTopics": "

A boolean indicating whether to record all ROS topics.

", "RobotApplicationConfig$useDefaultUploadConfigurations": "

A Boolean indicating whether to use default upload configurations. By default, .ros and .gazebo files are uploaded when the application terminates and all ROS topics will be recorded.

If you set this value, you must specify an outputLocation.

", + "RobotApplicationConfig$useDefaultTools": "

A Boolean indicating whether to use default robot application tools. The default tools are rviz, rqt, terminal and rosbag record. The default is False.

", "SimulationApplicationConfig$useDefaultUploadConfigurations": "

A Boolean indicating whether to use default upload configurations. By default, .ros and .gazebo files are uploaded when the application terminates and all ROS topics will be recorded.

If you set this value, you must specify an outputLocation.

", - "SimulationJobRequest$useDefaultApplications": "

A Boolean indicating whether to use default applications in the simulation job. Default applications include Gazebo, rqt, rviz and terminal access.

" + "SimulationApplicationConfig$useDefaultTools": "

A Boolean indicating whether to use default simulation application tools. The default tools are rviz, rqt, terminal and rosbag record. The default is False.

", + "SimulationJobRequest$useDefaultApplications": "

A Boolean indicating whether to use default applications in the simulation job. Default applications include Gazebo, rqt, rviz and terminal access.

", + "Tool$streamUI": "

Boolean indicating whether a streaming session will be configured for the tool. If True, AWS RoboMaker will configure a connection so you can interact with the tool as it is running in the simulation. It must have a graphical user interface. The default is False.

", + "Tool$streamOutputToCloudWatch": "

Boolean indicating whether logs will be recorded in CloudWatch for the tool. The default is False.

" } }, "CancelDeploymentJobRequest": { @@ -802,6 +806,12 @@ "EnvironmentVariableMap$value": null } }, + "ExitBehavior": { + "base": null, + "refs": { + "Tool$exitBehavior": "

Exit behavior determines what happens when your tool quits running. RESTART will cause your tool to be restarted. FAIL will cause your job to exit. The default is RESTART.

" + } + }, "FailedAt": { "base": null, "refs": { @@ -1228,6 +1238,7 @@ "SimulationApplicationSummary$name": "

The name of the simulation application.

", "SimulationJob$name": "

The name of the simulation job.

", "SimulationJobSummary$name": "

The name of the simulation job.

", + "Tool$name": "

The name of the tool.

", "UpdateRobotApplicationResponse$name": "

The name of the robot application.

", "UpdateSimulationApplicationResponse$name": "

The name of the simulation application.

", "UploadConfiguration$name": "

A prefix that specifies where files will be uploaded in Amazon S3. It is appended to the simulation output location to determine the final path.

For example, if your simulation output location is s3://my-bucket and your upload configuration name is robot-test, your files will be uploaded to s3://my-bucket/<simid>/<runid>/robot-test.

" @@ -1905,6 +1916,25 @@ "refs": { } }, + "Tool": { + "base": "

Information about a tool. Tools are used in a simulation job.

", + "refs": { + "Tools$member": null + } + }, + "Tools": { + "base": null, + "refs": { + "RobotApplicationConfig$tools": "

Information about tools configured for the robot application.

", + "SimulationApplicationConfig$tools": "

Information about tools configured for the simulation application.

" + } + }, + "UnrestrictedCommand": { + "base": null, + "refs": { + "Tool$command": "

Command-line arguments for the tool. It must include the tool executable name.

" + } + }, "UntagResourceRequest": { "base": null, "refs": { @@ -1948,7 +1978,7 @@ "UploadBehavior": { "base": null, "refs": { - "UploadConfiguration$uploadBehavior": "

Specifies how to upload the files:

UPLOAD_ON_TERMINATE

Matching files are uploaded once the simulation enters the TERMINATING state. Matching files are not uploaded until all of your code (including tools) have stopped.

If there is a problem uploading a file, the upload is retried. If problems persist, no further upload attempts will be made.

UPLOAD_ROLLING_AUTO_REMOVE

Matching files are uploaded as they are created. They are deleted after they are uploaded. The specified path is checked every 5 seconds. A final check is made when all of your code (including tools) have stopped.

" + "UploadConfiguration$uploadBehavior": "

Specifies when to upload the files:

UPLOAD_ON_TERMINATE

Matching files are uploaded once the simulation enters the TERMINATING state. Matching files are not uploaded until all of your code (including tools) have stopped.

If there is a problem uploading a file, the upload is retried. If problems persist, no further upload attempts will be made.

UPLOAD_ROLLING_AUTO_REMOVE

Matching files are uploaded as they are created. They are deleted after they are uploaded. The specified path is checked every 5 seconds. A final check is made when all of your code (including tools) have stopped.

" } }, "UploadConfiguration": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 36467c82801..74f5f380536 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -4636,6 +4636,21 @@ "us-west-2" : { } } }, + "personalize" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "pinpoint" : { "defaults" : { "credentialScope" : { @@ -7513,7 +7528,8 @@ }, "lakeformation" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "lambda" : { @@ -7575,6 +7591,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-cn-global" }, + "personalize" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "polly" : { "endpoints" : { "cn-northwest-1" : { } diff --git a/service/appstream/api.go b/service/appstream/api.go index 95891252066..73fa64816dd 100644 --- a/service/appstream/api.go +++ b/service/appstream/api.go @@ -82,7 +82,7 @@ func (c *AppStream) AssociateFleetRequest(input *AssociateFleetInput) (req *requ // An API error occurred. Wait a few minutes and try again. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // * OperationNotPermittedException // The attempted operation is not permitted. @@ -347,7 +347,7 @@ func (c *AppStream) CopyImageRequest(input *CopyImageInput) (req *request.Reques // assistance, contact AWS Support. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CopyImage func (c *AppStream) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { @@ -554,7 +554,7 @@ func (c *AppStream) CreateFleetRequest(input *CreateFleetInput) (req *request.Re // Indicates an incorrect combination of parameters, or a missing parameter. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // * OperationNotPermittedException // The attempted operation is not permitted. @@ -670,7 +670,7 @@ func (c *AppStream) CreateImageBuilderRequest(input *CreateImageBuilderInput) (r // Indicates an incorrect combination of parameters, or a missing parameter. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // * OperationNotPermittedException // The attempted operation is not permitted. @@ -968,6 +968,109 @@ func (c *AppStream) CreateStreamingURLWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateUpdatedImage = "CreateUpdatedImage" + +// CreateUpdatedImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateUpdatedImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateUpdatedImage for more information on using the CreateUpdatedImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateUpdatedImageRequest method. +// req, resp := client.CreateUpdatedImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUpdatedImage +func (c *AppStream) CreateUpdatedImageRequest(input *CreateUpdatedImageInput) (req *request.Request, output *CreateUpdatedImageOutput) { + op := &request.Operation{ + Name: opCreateUpdatedImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUpdatedImageInput{} + } + + output = &CreateUpdatedImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateUpdatedImage API operation for Amazon AppStream. +// +// Creates a new image with the latest Windows operating system updates, driver +// updates, and AppStream 2.0 agent software. +// +// For more information, see the "Update an Image by Using Managed AppStream +// 2.0 Image Updates" section in Administer Your AppStream 2.0 Images (https://docs.aws.amazon.com/appstream2/latest/developerguide/administer-images.html), +// in the Amazon AppStream 2.0 Administration Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon AppStream's +// API operation CreateUpdatedImage for usage and error information. +// +// Returned Error Types: +// * LimitExceededException +// The requested limit exceeds the permitted limit for an account. +// +// * InvalidAccountStatusException +// The resource cannot be created because your AWS account is suspended. For +// assistance, contact AWS Support. +// +// * OperationNotPermittedException +// The attempted operation is not permitted. +// +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// * ResourceNotFoundException +// The specified resource was not found. +// +// * ConcurrentModificationException +// An API error occurred. Wait a few minutes and try again. +// +// * IncompatibleImageException +// The image can't be updated because it's not compatible for updates. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateUpdatedImage +func (c *AppStream) CreateUpdatedImage(input *CreateUpdatedImageInput) (*CreateUpdatedImageOutput, error) { + req, out := c.CreateUpdatedImageRequest(input) + return out, req.Send() +} + +// CreateUpdatedImageWithContext is the same as CreateUpdatedImage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUpdatedImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppStream) CreateUpdatedImageWithContext(ctx aws.Context, input *CreateUpdatedImageInput, opts ...request.Option) (*CreateUpdatedImageOutput, error) { + req, out := c.CreateUpdatedImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateUsageReportSubscription = "CreateUsageReportSubscription" // CreateUsageReportSubscriptionRequest generates a "aws/request.Request" representing the @@ -3505,7 +3608,7 @@ func (c *AppStream) StartImageBuilderRequest(input *StartImageBuilderInput) (req // assistance, contact AWS Support. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/StartImageBuilder func (c *AppStream) StartImageBuilder(input *StartImageBuilderInput) (*StartImageBuilderOutput, error) { @@ -4064,7 +4167,7 @@ func (c *AppStream) UpdateFleetRequest(input *UpdateFleetInput) (req *request.Re // An API error occurred. Wait a few minutes and try again. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // * OperationNotPermittedException // The attempted operation is not permitted. @@ -4251,7 +4354,7 @@ func (c *AppStream) UpdateStackRequest(input *UpdateStackInput) (req *request.Re // assistance, contact AWS Support. // // * IncompatibleImageException -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. // // * OperationNotPermittedException // The attempted operation is not permitted. @@ -6177,6 +6280,146 @@ func (s *CreateStreamingURLOutput) SetStreamingURL(v string) *CreateStreamingURL return s } +type CreateUpdatedImageInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to display the status of image update availability before + // AppStream 2.0 initiates the process of creating a new updated image. If this + // value is set to true, AppStream 2.0 displays whether image updates are available. + // If this value is set to false, AppStream 2.0 initiates the process of creating + // a new updated image without displaying whether image updates are available. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the image to update. + // + // ExistingImageName is a required field + ExistingImageName *string `locationName:"existingImageName" type:"string" required:"true"` + + // The description to display for the new image. + NewImageDescription *string `locationName:"newImageDescription" type:"string"` + + // The name to display for the new image. + NewImageDisplayName *string `locationName:"newImageDisplayName" type:"string"` + + // The name of the new image. The name must be unique within the AWS account + // and Region. + // + // NewImageName is a required field + NewImageName *string `locationName:"newImageName" type:"string" required:"true"` + + // The tags to associate with the new image. A tag is a key-value pair, and + // the value is optional. For example, Environment=Test. If you do not specify + // a value, Environment=. + // + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following special characters: + // + // _ . : / = + \ - @ + // + // If you do not specify a value, the value is set to an empty string. + // + // For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/appstream2/latest/developerguide/tagging-basic.html) + // in the Amazon AppStream 2.0 Administration Guide. + NewImageTags map[string]*string `locationName:"newImageTags" min:"1" type:"map"` +} + +// String returns the string representation +func (s CreateUpdatedImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUpdatedImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUpdatedImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUpdatedImageInput"} + if s.ExistingImageName == nil { + invalidParams.Add(request.NewErrParamRequired("ExistingImageName")) + } + if s.NewImageName == nil { + invalidParams.Add(request.NewErrParamRequired("NewImageName")) + } + if s.NewImageTags != nil && len(s.NewImageTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewImageTags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateUpdatedImageInput) SetDryRun(v bool) *CreateUpdatedImageInput { + s.DryRun = &v + return s +} + +// SetExistingImageName sets the ExistingImageName field's value. +func (s *CreateUpdatedImageInput) SetExistingImageName(v string) *CreateUpdatedImageInput { + s.ExistingImageName = &v + return s +} + +// SetNewImageDescription sets the NewImageDescription field's value. +func (s *CreateUpdatedImageInput) SetNewImageDescription(v string) *CreateUpdatedImageInput { + s.NewImageDescription = &v + return s +} + +// SetNewImageDisplayName sets the NewImageDisplayName field's value. +func (s *CreateUpdatedImageInput) SetNewImageDisplayName(v string) *CreateUpdatedImageInput { + s.NewImageDisplayName = &v + return s +} + +// SetNewImageName sets the NewImageName field's value. +func (s *CreateUpdatedImageInput) SetNewImageName(v string) *CreateUpdatedImageInput { + s.NewImageName = &v + return s +} + +// SetNewImageTags sets the NewImageTags field's value. +func (s *CreateUpdatedImageInput) SetNewImageTags(v map[string]*string) *CreateUpdatedImageInput { + s.NewImageTags = v + return s +} + +type CreateUpdatedImageOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether a new image can be created. + CanUpdateImage *bool `locationName:"canUpdateImage" type:"boolean"` + + // Describes an image. + Image *Image `locationName:"image" type:"structure"` +} + +// String returns the string representation +func (s CreateUpdatedImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUpdatedImageOutput) GoString() string { + return s.String() +} + +// SetCanUpdateImage sets the CanUpdateImage field's value. +func (s *CreateUpdatedImageOutput) SetCanUpdateImage(v bool) *CreateUpdatedImageOutput { + s.CanUpdateImage = &v + return s +} + +// SetImage sets the Image field's value. +func (s *CreateUpdatedImageOutput) SetImage(v *Image) *CreateUpdatedImageOutput { + s.Image = v + return s +} + type CreateUsageReportSubscriptionInput struct { _ struct{} `type:"structure"` } @@ -8529,6 +8772,9 @@ type Image struct { // Indicates whether an image builder can be launched from this image. ImageBuilderSupported *bool `type:"boolean"` + // Describes the errors that are returned when a new image can't be created. + ImageErrors []*ResourceError `type:"list"` + // The permissions to provide to the destination AWS account for the specified // image. ImagePermissions *ImagePermissions `type:"structure"` @@ -8620,6 +8866,12 @@ func (s *Image) SetImageBuilderSupported(v bool) *Image { return s } +// SetImageErrors sets the ImageErrors field's value. +func (s *Image) SetImageErrors(v []*ResourceError) *Image { + s.ImageErrors = v + return s +} + // SetImagePermissions sets the ImagePermissions field's value. func (s *Image) SetImagePermissions(v *ImagePermissions) *Image { s.ImagePermissions = v @@ -9019,7 +9271,7 @@ func (s *ImageStateChangeReason) SetMessage(v string) *ImageStateChangeReason { return s } -// The image does not support storage connectors. +// The image can't be updated because it's not compatible for updates. type IncompatibleImageException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12228,6 +12480,12 @@ const ( // ImageBuilderStateFailed is a ImageBuilderState enum value ImageBuilderStateFailed = "FAILED" + + // ImageBuilderStateUpdating is a ImageBuilderState enum value + ImageBuilderStateUpdating = "UPDATING" + + // ImageBuilderStatePendingQualification is a ImageBuilderState enum value + ImageBuilderStatePendingQualification = "PENDING_QUALIFICATION" ) // ImageBuilderState_Values returns all elements of the ImageBuilderState enum @@ -12242,6 +12500,8 @@ func ImageBuilderState_Values() []string { ImageBuilderStateSnapshotting, ImageBuilderStateDeleting, ImageBuilderStateFailed, + ImageBuilderStateUpdating, + ImageBuilderStatePendingQualification, } } diff --git a/service/appstream/appstreamiface/interface.go b/service/appstream/appstreamiface/interface.go index 52b3dac17f5..bdcf59a2517 100644 --- a/service/appstream/appstreamiface/interface.go +++ b/service/appstream/appstreamiface/interface.go @@ -100,6 +100,10 @@ type AppStreamAPI interface { CreateStreamingURLWithContext(aws.Context, *appstream.CreateStreamingURLInput, ...request.Option) (*appstream.CreateStreamingURLOutput, error) CreateStreamingURLRequest(*appstream.CreateStreamingURLInput) (*request.Request, *appstream.CreateStreamingURLOutput) + CreateUpdatedImage(*appstream.CreateUpdatedImageInput) (*appstream.CreateUpdatedImageOutput, error) + CreateUpdatedImageWithContext(aws.Context, *appstream.CreateUpdatedImageInput, ...request.Option) (*appstream.CreateUpdatedImageOutput, error) + CreateUpdatedImageRequest(*appstream.CreateUpdatedImageInput) (*request.Request, *appstream.CreateUpdatedImageOutput) + CreateUsageReportSubscription(*appstream.CreateUsageReportSubscriptionInput) (*appstream.CreateUsageReportSubscriptionOutput, error) CreateUsageReportSubscriptionWithContext(aws.Context, *appstream.CreateUsageReportSubscriptionInput, ...request.Option) (*appstream.CreateUsageReportSubscriptionOutput, error) CreateUsageReportSubscriptionRequest(*appstream.CreateUsageReportSubscriptionInput) (*request.Request, *appstream.CreateUsageReportSubscriptionOutput) diff --git a/service/appstream/errors.go b/service/appstream/errors.go index f1570a479d9..2d21300b97f 100644 --- a/service/appstream/errors.go +++ b/service/appstream/errors.go @@ -17,7 +17,7 @@ const ( // ErrCodeIncompatibleImageException for service response error code // "IncompatibleImageException". // - // The image does not support storage connectors. + // The image can't be updated because it's not compatible for updates. ErrCodeIncompatibleImageException = "IncompatibleImageException" // ErrCodeInvalidAccountStatusException for service response error code diff --git a/service/autoscaling/api.go b/service/autoscaling/api.go index bfc4770fc45..33dcca64fed 100644 --- a/service/autoscaling/api.go +++ b/service/autoscaling/api.go @@ -1608,6 +1608,100 @@ func (c *AutoScaling) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsIn return out, req.Send() } +const opDeleteWarmPool = "DeleteWarmPool" + +// DeleteWarmPoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWarmPool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWarmPool for more information on using the DeleteWarmPool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteWarmPoolRequest method. +// req, resp := client.DeleteWarmPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteWarmPool +func (c *AutoScaling) DeleteWarmPoolRequest(input *DeleteWarmPoolInput) (req *request.Request, output *DeleteWarmPoolOutput) { + op := &request.Operation{ + Name: opDeleteWarmPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWarmPoolInput{} + } + + output = &DeleteWarmPoolOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteWarmPool API operation for Auto Scaling. +// +// Deletes the warm pool for the specified Auto Scaling group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation DeleteWarmPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" +// The operation can't be performed because there are scaling activities in +// progress. +// +// * ErrCodeResourceInUseFault "ResourceInUse" +// The operation can't be performed because the resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteWarmPool +func (c *AutoScaling) DeleteWarmPool(input *DeleteWarmPoolInput) (*DeleteWarmPoolOutput, error) { + req, out := c.DeleteWarmPoolRequest(input) + return out, req.Send() +} + +// DeleteWarmPoolWithContext is the same as DeleteWarmPool with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWarmPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteWarmPoolWithContext(ctx aws.Context, input *DeleteWarmPoolInput, opts ...request.Option) (*DeleteWarmPoolOutput, error) { + req, out := c.DeleteWarmPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAccountLimits = "DescribeAccountLimits" // DescribeAccountLimitsRequest generates a "aws/request.Request" representing the @@ -3698,6 +3792,95 @@ func (c *AutoScaling) DescribeTerminationPolicyTypesWithContext(ctx aws.Context, return out, req.Send() } +const opDescribeWarmPool = "DescribeWarmPool" + +// DescribeWarmPoolRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWarmPool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWarmPool for more information on using the DescribeWarmPool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWarmPoolRequest method. +// req, resp := client.DescribeWarmPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeWarmPool +func (c *AutoScaling) DescribeWarmPoolRequest(input *DescribeWarmPoolInput) (req *request.Request, output *DescribeWarmPoolOutput) { + op := &request.Operation{ + Name: opDescribeWarmPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWarmPoolInput{} + } + + output = &DescribeWarmPoolOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWarmPool API operation for Auto Scaling. +// +// Describes a warm pool and its instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation DescribeWarmPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The NextToken value is not valid. +// +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeWarmPool +func (c *AutoScaling) DescribeWarmPool(input *DescribeWarmPoolInput) (*DescribeWarmPoolOutput, error) { + req, out := c.DescribeWarmPoolRequest(input) + return out, req.Send() +} + +// DescribeWarmPoolWithContext is the same as DescribeWarmPool with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWarmPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeWarmPoolWithContext(ctx aws.Context, input *DescribeWarmPoolInput, opts ...request.Option) (*DescribeWarmPoolOutput, error) { + req, out := c.DescribeWarmPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDetachInstances = "DetachInstances" // DetachInstancesRequest generates a "aws/request.Request" representing the @@ -4806,6 +4989,107 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionWithContext(ctx aws.Context, return out, req.Send() } +const opPutWarmPool = "PutWarmPool" + +// PutWarmPoolRequest generates a "aws/request.Request" representing the +// client's request for the PutWarmPool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutWarmPool for more information on using the PutWarmPool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutWarmPoolRequest method. +// req, resp := client.PutWarmPoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutWarmPool +func (c *AutoScaling) PutWarmPoolRequest(input *PutWarmPoolInput) (req *request.Request, output *PutWarmPoolOutput) { + op := &request.Operation{ + Name: opPutWarmPool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutWarmPoolInput{} + } + + output = &PutWarmPoolOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutWarmPool API operation for Auto Scaling. +// +// Adds a warm pool to the specified Auto Scaling group. A warm pool is a pool +// of pre-initialized EC2 instances that sits alongside the Auto Scaling group. +// Whenever your application needs to scale out, the Auto Scaling group can +// draw on the warm pool to meet its new desired capacity. For more information, +// see Warm pools for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html) +// in the Amazon EC2 Auto Scaling User Guide. +// +// This operation must be called from the Region in which the Auto Scaling group +// was created. This operation cannot be called on an Auto Scaling group that +// has a mixed instances policy or a launch template or launch configuration +// that requests Spot Instances. +// +// You can view the instances in the warm pool using the DescribeWarmPool API +// call. If you are no longer using a warm pool, you can delete it by calling +// the DeleteWarmPool API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation PutWarmPool for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutWarmPool +func (c *AutoScaling) PutWarmPool(input *PutWarmPoolInput) (*PutWarmPoolOutput, error) { + req, out := c.PutWarmPoolRequest(input) + return out, req.Send() +} + +// PutWarmPoolWithContext is the same as PutWarmPool with the addition of +// the ability to pass a context and additional request options. +// +// See PutWarmPool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) PutWarmPoolWithContext(ctx aws.Context, input *PutWarmPoolInput, opts ...request.Option) (*PutWarmPoolOutput, error) { + req, out := c.PutWarmPoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" // RecordLifecycleActionHeartbeatRequest generates a "aws/request.Request" representing the @@ -4874,7 +5158,7 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecyc // // If you finish before the timeout period ends, complete the lifecycle action. // -// For more information, see Auto Scaling lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html) +// For more information, see Amazon EC2 Auto Scaling lifecycle hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5220,7 +5504,8 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // SetInstanceProtection API operation for Auto Scaling. // -// Updates the instance protection settings of the specified instances. +// Updates the instance protection settings of the specified instances. This +// operation cannot be called on instances in a warm pool. // // For more information about preventing instances that are part of an Auto // Scaling group from terminating on scale in, see Instance scale-in protection @@ -5315,8 +5600,8 @@ func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInp // StartInstanceRefresh API operation for Auto Scaling. // // Starts a new instance refresh operation, which triggers a rolling replacement -// of all previously launched instances in the Auto Scaling group with a new -// group of instances. +// of previously launched instances in the Auto Scaling group with a new group +// of instances. // // If successful, this call creates a new instance refresh request with a unique // ID that you can use to track its progress. To query its status, call the @@ -5509,7 +5794,7 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat // TerminateInstanceInAutoScalingGroup API operation for Auto Scaling. // // Terminates the specified instance and optionally adjusts the desired group -// size. +// size. This operation cannot be called on instances in a warm pool. // // This call simply makes a termination request. The instance is not terminated // immediately. When an instance is terminated, the instance status changes @@ -7513,7 +7798,8 @@ type DeleteAutoScalingGroupInput struct { // Specifies that the group is to be deleted along with all instances associated // with the group, without waiting for all instances to be terminated. This - // parameter also deletes any lifecycle actions associated with the group. + // parameter also deletes any outstanding lifecycle actions associated with + // the group. ForceDelete *bool `type:"boolean"` } @@ -7970,45 +8256,112 @@ func (s DeleteTagsOutput) GoString() string { return s.String() } -type DescribeAccountLimitsInput struct { +type DeleteWarmPoolInput struct { _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Specifies that the warm pool is to be deleted along with all instances associated + // with the warm pool, without waiting for all instances to be terminated. This + // parameter also deletes any outstanding lifecycle actions associated with + // the warm pool instances. + ForceDelete *bool `type:"boolean"` } // String returns the string representation -func (s DescribeAccountLimitsInput) String() string { +func (s DeleteWarmPoolInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAccountLimitsInput) GoString() string { +func (s DeleteWarmPoolInput) GoString() string { return s.String() } -type DescribeAccountLimitsOutput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWarmPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWarmPoolInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } - // The maximum number of groups allowed for your AWS account. The default is - // 200 groups per AWS Region. - MaxNumberOfAutoScalingGroups *int64 `type:"integer"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The maximum number of launch configurations allowed for your AWS account. - // The default is 200 launch configurations per AWS Region. - MaxNumberOfLaunchConfigurations *int64 `type:"integer"` +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *DeleteWarmPoolInput) SetAutoScalingGroupName(v string) *DeleteWarmPoolInput { + s.AutoScalingGroupName = &v + return s +} - // The current number of groups for your AWS account. - NumberOfAutoScalingGroups *int64 `type:"integer"` +// SetForceDelete sets the ForceDelete field's value. +func (s *DeleteWarmPoolInput) SetForceDelete(v bool) *DeleteWarmPoolInput { + s.ForceDelete = &v + return s +} - // The current number of launch configurations for your AWS account. - NumberOfLaunchConfigurations *int64 `type:"integer"` +type DeleteWarmPoolOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s DescribeAccountLimitsOutput) String() string { +func (s DeleteWarmPoolOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeAccountLimitsOutput) GoString() string { +func (s DeleteWarmPoolOutput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of groups allowed for your AWS account. The default is + // 200 groups per AWS Region. + MaxNumberOfAutoScalingGroups *int64 `type:"integer"` + + // The maximum number of launch configurations allowed for your AWS account. + // The default is 200 launch configurations per AWS Region. + MaxNumberOfLaunchConfigurations *int64 `type:"integer"` + + // The current number of groups for your AWS account. + NumberOfAutoScalingGroups *int64 `type:"integer"` + + // The current number of launch configurations for your AWS account. + NumberOfLaunchConfigurations *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { return s.String() } @@ -9395,6 +9748,109 @@ func (s *DescribeTerminationPolicyTypesOutput) SetTerminationPolicyTypes(v []*st return s } +type DescribeWarmPoolInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The maximum number of instances to return with this call. The maximum value + // is 50. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of instances to return. (You received this token + // from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeWarmPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWarmPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWarmPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWarmPoolInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *DescribeWarmPoolInput) SetAutoScalingGroupName(v string) *DescribeWarmPoolInput { + s.AutoScalingGroupName = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeWarmPoolInput) SetMaxRecords(v int64) *DescribeWarmPoolInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWarmPoolInput) SetNextToken(v string) *DescribeWarmPoolInput { + s.NextToken = &v + return s +} + +type DescribeWarmPoolOutput struct { + _ struct{} `type:"structure"` + + // The instances that are currently in the warm pool. + Instances []*Instance `type:"list"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The warm pool configuration details. + WarmPoolConfiguration *WarmPoolConfiguration `type:"structure"` +} + +// String returns the string representation +func (s DescribeWarmPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWarmPoolOutput) GoString() string { + return s.String() +} + +// SetInstances sets the Instances field's value. +func (s *DescribeWarmPoolOutput) SetInstances(v []*Instance) *DescribeWarmPoolOutput { + s.Instances = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWarmPoolOutput) SetNextToken(v string) *DescribeWarmPoolOutput { + s.NextToken = &v + return s +} + +// SetWarmPoolConfiguration sets the WarmPoolConfiguration field's value. +func (s *DescribeWarmPoolOutput) SetWarmPoolConfiguration(v *WarmPoolConfiguration) *DescribeWarmPoolOutput { + s.WarmPoolConfiguration = v + return s +} + type DetachInstancesInput struct { _ struct{} `type:"structure"` @@ -9658,6 +10114,20 @@ type DisableMetricsCollectionInput struct { // // * GroupTotalCapacity // + // * WarmPoolDesiredCapacity + // + // * WarmPoolWarmedCapacity + // + // * WarmPoolPendingCapacity + // + // * WarmPoolTerminatingCapacity + // + // * WarmPoolTotalCapacity + // + // * GroupAndWarmPoolDesiredCapacity + // + // * GroupAndWarmPoolTotalCapacity + // // If you omit this parameter, all metrics are disabled. Metrics []*string `type:"list"` } @@ -9898,6 +10368,22 @@ type EnableMetricsCollectionInput struct { // // * GroupTotalCapacity // + // The warm pools feature supports the following additional metrics: + // + // * WarmPoolDesiredCapacity + // + // * WarmPoolWarmedCapacity + // + // * WarmPoolPendingCapacity + // + // * WarmPoolTerminatingCapacity + // + // * WarmPoolTotalCapacity + // + // * GroupAndWarmPoolDesiredCapacity + // + // * GroupAndWarmPoolTotalCapacity + // // If you omit this parameter, all metrics are enabled. Metrics []*string `type:"list"` } @@ -10000,6 +10486,20 @@ type EnabledMetric struct { // * GroupTerminatingCapacity // // * GroupTotalCapacity + // + // * WarmPoolDesiredCapacity + // + // * WarmPoolWarmedCapacity + // + // * WarmPoolPendingCapacity + // + // * WarmPoolTerminatingCapacity + // + // * WarmPoolTotalCapacity + // + // * GroupAndWarmPoolDesiredCapacity + // + // * GroupAndWarmPoolTotalCapacity Metric *string `min:"1" type:"string"` } @@ -10487,6 +10987,12 @@ type Group struct { // One or more subnet IDs, if applicable, separated by commas. VPCZoneIdentifier *string `min:"1" type:"string"` + + // The warm pool for the group. + WarmPoolConfiguration *WarmPoolConfiguration `type:"structure"` + + // The current size of the warm pool. + WarmPoolSize *int64 `type:"integer"` } // String returns the string representation @@ -10661,6 +11167,18 @@ func (s *Group) SetVPCZoneIdentifier(v string) *Group { return s } +// SetWarmPoolConfiguration sets the WarmPoolConfiguration field's value. +func (s *Group) SetWarmPoolConfiguration(v *WarmPoolConfiguration) *Group { + s.WarmPoolConfiguration = v + return s +} + +// SetWarmPoolSize sets the WarmPoolSize field's value. +func (s *Group) SetWarmPoolSize(v int64) *Group { + s.WarmPoolSize = &v + return s +} + // Describes an EC2 instance. type Instance struct { _ struct{} `type:"structure"` @@ -10819,7 +11337,9 @@ type InstanceDetails struct { // // Valid Values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService // | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching - // | Detached | EnteringStandby | Standby + // | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait + // | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | + // Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running // // LifecycleState is a required field LifecycleState *string `min:"1" type:"string" required:"true"` @@ -11031,6 +11551,9 @@ type InstanceRefresh struct { // added to the percentage complete. PercentageComplete *int64 `type:"integer"` + // Additional progress details for an Auto Scaling group that has a warm pool. + ProgressDetails *InstanceRefreshProgressDetails `type:"structure"` + // The date and time at which the instance refresh began. StartTime *time.Time `type:"timestamp"` @@ -11096,6 +11619,12 @@ func (s *InstanceRefresh) SetPercentageComplete(v int64) *InstanceRefresh { return s } +// SetProgressDetails sets the ProgressDetails field's value. +func (s *InstanceRefresh) SetProgressDetails(v *InstanceRefreshProgressDetails) *InstanceRefresh { + s.ProgressDetails = v + return s +} + // SetStartTime sets the StartTime field's value. func (s *InstanceRefresh) SetStartTime(v time.Time) *InstanceRefresh { s.StartTime = &v @@ -11114,6 +11643,119 @@ func (s *InstanceRefresh) SetStatusReason(v string) *InstanceRefresh { return s } +// Reports the progress of an instance fresh on instances that are in the Auto +// Scaling group. +type InstanceRefreshLivePoolProgress struct { + _ struct{} `type:"structure"` + + // The number of instances remaining to update. + InstancesToUpdate *int64 `type:"integer"` + + // The percentage of instances in the Auto Scaling group that have been replaced. + // For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's + // health status and warm-up time. When the instance's health status changes + // to healthy and the specified warm-up time passes, the instance is considered + // updated and added to the percentage complete. + PercentageComplete *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstanceRefreshLivePoolProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceRefreshLivePoolProgress) GoString() string { + return s.String() +} + +// SetInstancesToUpdate sets the InstancesToUpdate field's value. +func (s *InstanceRefreshLivePoolProgress) SetInstancesToUpdate(v int64) *InstanceRefreshLivePoolProgress { + s.InstancesToUpdate = &v + return s +} + +// SetPercentageComplete sets the PercentageComplete field's value. +func (s *InstanceRefreshLivePoolProgress) SetPercentageComplete(v int64) *InstanceRefreshLivePoolProgress { + s.PercentageComplete = &v + return s +} + +// Reports the progress of an instance refresh on an Auto Scaling group that +// has a warm pool. This includes separate details for instances in the warm +// pool and instances in the Auto Scaling group (the live pool). +type InstanceRefreshProgressDetails struct { + _ struct{} `type:"structure"` + + // Indicates the progress of an instance fresh on instances that are in the + // Auto Scaling group. + LivePoolProgress *InstanceRefreshLivePoolProgress `type:"structure"` + + // Indicates the progress of an instance fresh on instances that are in the + // warm pool. + WarmPoolProgress *InstanceRefreshWarmPoolProgress `type:"structure"` +} + +// String returns the string representation +func (s InstanceRefreshProgressDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceRefreshProgressDetails) GoString() string { + return s.String() +} + +// SetLivePoolProgress sets the LivePoolProgress field's value. +func (s *InstanceRefreshProgressDetails) SetLivePoolProgress(v *InstanceRefreshLivePoolProgress) *InstanceRefreshProgressDetails { + s.LivePoolProgress = v + return s +} + +// SetWarmPoolProgress sets the WarmPoolProgress field's value. +func (s *InstanceRefreshProgressDetails) SetWarmPoolProgress(v *InstanceRefreshWarmPoolProgress) *InstanceRefreshProgressDetails { + s.WarmPoolProgress = v + return s +} + +// Reports the progress of an instance fresh on instances that are in the warm +// pool. +type InstanceRefreshWarmPoolProgress struct { + _ struct{} `type:"structure"` + + // The number of instances remaining to update. + InstancesToUpdate *int64 `type:"integer"` + + // The percentage of instances in the warm pool that have been replaced. For + // each instance replacement, Amazon EC2 Auto Scaling tracks the instance's + // health status and warm-up time. When the instance's health status changes + // to healthy and the specified warm-up time passes, the instance is considered + // updated and added to the percentage complete. + PercentageComplete *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstanceRefreshWarmPoolProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceRefreshWarmPoolProgress) GoString() string { + return s.String() +} + +// SetInstancesToUpdate sets the InstancesToUpdate field's value. +func (s *InstanceRefreshWarmPoolProgress) SetInstancesToUpdate(v int64) *InstanceRefreshWarmPoolProgress { + s.InstancesToUpdate = &v + return s +} + +// SetPercentageComplete sets the PercentageComplete field's value. +func (s *InstanceRefreshWarmPoolProgress) SetPercentageComplete(v int64) *InstanceRefreshWarmPoolProgress { + s.PercentageComplete = &v + return s +} + // Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy. // // The instances distribution specifies the distribution of On-Demand Instances @@ -12136,6 +12778,20 @@ type MetricCollectionType struct { // * GroupTerminatingCapacity // // * GroupTotalCapacity + // + // * WarmPoolDesiredCapacity + // + // * WarmPoolWarmedCapacity + // + // * WarmPoolPendingCapacity + // + // * WarmPoolTerminatingCapacity + // + // * WarmPoolTotalCapacity + // + // * GroupAndWarmPoolDesiredCapacity + // + // * GroupAndWarmPoolTotalCapacity Metric *string `min:"1" type:"string"` } @@ -13188,6 +13844,111 @@ func (s PutScheduledUpdateGroupActionOutput) GoString() string { return s.String() } +type PutWarmPoolInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Specifies the total maximum number of instances that are allowed to be in + // the warm pool or in any state except Terminated for the Auto Scaling group. + // This is an optional property. Specify it only if the warm pool size should + // not be determined by the difference between the group's maximum capacity + // and its desired capacity. + // + // Amazon EC2 Auto Scaling will launch and maintain either the difference between + // the group's maximum capacity and its desired capacity, if a value for MaxGroupPreparedCapacity + // is not specified, or the difference between the MaxGroupPreparedCapacity + // and the desired capacity, if a value for MaxGroupPreparedCapacity is specified. + // + // The size of the warm pool is dynamic. Only when MaxGroupPreparedCapacity + // and MinSize are set to the same value does the warm pool have an absolute + // size. + // + // If the desired capacity of the Auto Scaling group is higher than the MaxGroupPreparedCapacity, + // the capacity of the warm pool is 0. To remove a value that you previously + // set, include the property but specify -1 for the value. + MaxGroupPreparedCapacity *int64 `type:"integer"` + + // Specifies the minimum number of instances to maintain in the warm pool. This + // helps you to ensure that there is always a certain number of warmed instances + // available to handle traffic spikes. Defaults to 0 if not specified. + MinSize *int64 `type:"integer"` + + // Sets the instance state to transition to after the lifecycle hooks finish. + // Valid values are: Stopped (default) or Running. + PoolState *string `type:"string" enum:"WarmPoolState"` +} + +// String returns the string representation +func (s PutWarmPoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutWarmPoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutWarmPoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutWarmPoolInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.MaxGroupPreparedCapacity != nil && *s.MaxGroupPreparedCapacity < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaxGroupPreparedCapacity", -1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *PutWarmPoolInput) SetAutoScalingGroupName(v string) *PutWarmPoolInput { + s.AutoScalingGroupName = &v + return s +} + +// SetMaxGroupPreparedCapacity sets the MaxGroupPreparedCapacity field's value. +func (s *PutWarmPoolInput) SetMaxGroupPreparedCapacity(v int64) *PutWarmPoolInput { + s.MaxGroupPreparedCapacity = &v + return s +} + +// SetMinSize sets the MinSize field's value. +func (s *PutWarmPoolInput) SetMinSize(v int64) *PutWarmPoolInput { + s.MinSize = &v + return s +} + +// SetPoolState sets the PoolState field's value. +func (s *PutWarmPoolInput) SetPoolState(v string) *PutWarmPoolInput { + s.PoolState = &v + return s +} + +type PutWarmPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutWarmPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutWarmPoolOutput) GoString() string { + return s.String() +} + type RecordLifecycleActionHeartbeatInput struct { _ struct{} `type:"structure"` @@ -14946,6 +15707,59 @@ func (s UpdateAutoScalingGroupOutput) GoString() string { return s.String() } +// Describes a warm pool configuration. +type WarmPoolConfiguration struct { + _ struct{} `type:"structure"` + + // The total maximum number of instances that are allowed to be in the warm + // pool or in any state except Terminated for the Auto Scaling group. + MaxGroupPreparedCapacity *int64 `type:"integer"` + + // The minimum number of instances to maintain in the warm pool. + MinSize *int64 `type:"integer"` + + // The instance state to transition to after the lifecycle actions are complete: + // Stopped or Running. + PoolState *string `type:"string" enum:"WarmPoolState"` + + // The status of a warm pool that is marked for deletion. + Status *string `type:"string" enum:"WarmPoolStatus"` +} + +// String returns the string representation +func (s WarmPoolConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WarmPoolConfiguration) GoString() string { + return s.String() +} + +// SetMaxGroupPreparedCapacity sets the MaxGroupPreparedCapacity field's value. +func (s *WarmPoolConfiguration) SetMaxGroupPreparedCapacity(v int64) *WarmPoolConfiguration { + s.MaxGroupPreparedCapacity = &v + return s +} + +// SetMinSize sets the MinSize field's value. +func (s *WarmPoolConfiguration) SetMinSize(v int64) *WarmPoolConfiguration { + s.MinSize = &v + return s +} + +// SetPoolState sets the PoolState field's value. +func (s *WarmPoolConfiguration) SetPoolState(v string) *WarmPoolConfiguration { + s.PoolState = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *WarmPoolConfiguration) SetStatus(v string) *WarmPoolConfiguration { + s.Status = &v + return s +} + const ( // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value InstanceMetadataEndpointStateDisabled = "disabled" @@ -15049,6 +15863,33 @@ const ( // LifecycleStateStandby is a LifecycleState enum value LifecycleStateStandby = "Standby" + + // LifecycleStateWarmedPending is a LifecycleState enum value + LifecycleStateWarmedPending = "Warmed:Pending" + + // LifecycleStateWarmedPendingWait is a LifecycleState enum value + LifecycleStateWarmedPendingWait = "Warmed:Pending:Wait" + + // LifecycleStateWarmedPendingProceed is a LifecycleState enum value + LifecycleStateWarmedPendingProceed = "Warmed:Pending:Proceed" + + // LifecycleStateWarmedTerminating is a LifecycleState enum value + LifecycleStateWarmedTerminating = "Warmed:Terminating" + + // LifecycleStateWarmedTerminatingWait is a LifecycleState enum value + LifecycleStateWarmedTerminatingWait = "Warmed:Terminating:Wait" + + // LifecycleStateWarmedTerminatingProceed is a LifecycleState enum value + LifecycleStateWarmedTerminatingProceed = "Warmed:Terminating:Proceed" + + // LifecycleStateWarmedTerminated is a LifecycleState enum value + LifecycleStateWarmedTerminated = "Warmed:Terminated" + + // LifecycleStateWarmedStopped is a LifecycleState enum value + LifecycleStateWarmedStopped = "Warmed:Stopped" + + // LifecycleStateWarmedRunning is a LifecycleState enum value + LifecycleStateWarmedRunning = "Warmed:Running" ) // LifecycleState_Values returns all elements of the LifecycleState enum @@ -15067,6 +15908,15 @@ func LifecycleState_Values() []string { LifecycleStateDetached, LifecycleStateEnteringStandby, LifecycleStateStandby, + LifecycleStateWarmedPending, + LifecycleStateWarmedPendingWait, + LifecycleStateWarmedPendingProceed, + LifecycleStateWarmedTerminating, + LifecycleStateWarmedTerminatingWait, + LifecycleStateWarmedTerminatingProceed, + LifecycleStateWarmedTerminated, + LifecycleStateWarmedStopped, + LifecycleStateWarmedRunning, } } @@ -15189,3 +16039,31 @@ func ScalingActivityStatusCode_Values() []string { ScalingActivityStatusCodeCancelled, } } + +const ( + // WarmPoolStateStopped is a WarmPoolState enum value + WarmPoolStateStopped = "Stopped" + + // WarmPoolStateRunning is a WarmPoolState enum value + WarmPoolStateRunning = "Running" +) + +// WarmPoolState_Values returns all elements of the WarmPoolState enum +func WarmPoolState_Values() []string { + return []string{ + WarmPoolStateStopped, + WarmPoolStateRunning, + } +} + +const ( + // WarmPoolStatusPendingDelete is a WarmPoolStatus enum value + WarmPoolStatusPendingDelete = "PendingDelete" +) + +// WarmPoolStatus_Values returns all elements of the WarmPoolStatus enum +func WarmPoolStatus_Values() []string { + return []string{ + WarmPoolStatusPendingDelete, + } +} diff --git a/service/autoscaling/autoscalingiface/interface.go b/service/autoscaling/autoscalingiface/interface.go index d936ac137f3..5b9e92f1979 100644 --- a/service/autoscaling/autoscalingiface/interface.go +++ b/service/autoscaling/autoscalingiface/interface.go @@ -128,6 +128,10 @@ type AutoScalingAPI interface { DeleteTagsWithContext(aws.Context, *autoscaling.DeleteTagsInput, ...request.Option) (*autoscaling.DeleteTagsOutput, error) DeleteTagsRequest(*autoscaling.DeleteTagsInput) (*request.Request, *autoscaling.DeleteTagsOutput) + DeleteWarmPool(*autoscaling.DeleteWarmPoolInput) (*autoscaling.DeleteWarmPoolOutput, error) + DeleteWarmPoolWithContext(aws.Context, *autoscaling.DeleteWarmPoolInput, ...request.Option) (*autoscaling.DeleteWarmPoolOutput, error) + DeleteWarmPoolRequest(*autoscaling.DeleteWarmPoolInput) (*request.Request, *autoscaling.DeleteWarmPoolOutput) + DescribeAccountLimits(*autoscaling.DescribeAccountLimitsInput) (*autoscaling.DescribeAccountLimitsOutput, error) DescribeAccountLimitsWithContext(aws.Context, *autoscaling.DescribeAccountLimitsInput, ...request.Option) (*autoscaling.DescribeAccountLimitsOutput, error) DescribeAccountLimitsRequest(*autoscaling.DescribeAccountLimitsInput) (*request.Request, *autoscaling.DescribeAccountLimitsOutput) @@ -228,6 +232,10 @@ type AutoScalingAPI interface { DescribeTerminationPolicyTypesWithContext(aws.Context, *autoscaling.DescribeTerminationPolicyTypesInput, ...request.Option) (*autoscaling.DescribeTerminationPolicyTypesOutput, error) DescribeTerminationPolicyTypesRequest(*autoscaling.DescribeTerminationPolicyTypesInput) (*request.Request, *autoscaling.DescribeTerminationPolicyTypesOutput) + DescribeWarmPool(*autoscaling.DescribeWarmPoolInput) (*autoscaling.DescribeWarmPoolOutput, error) + DescribeWarmPoolWithContext(aws.Context, *autoscaling.DescribeWarmPoolInput, ...request.Option) (*autoscaling.DescribeWarmPoolOutput, error) + DescribeWarmPoolRequest(*autoscaling.DescribeWarmPoolInput) (*request.Request, *autoscaling.DescribeWarmPoolOutput) + DetachInstances(*autoscaling.DetachInstancesInput) (*autoscaling.DetachInstancesOutput, error) DetachInstancesWithContext(aws.Context, *autoscaling.DetachInstancesInput, ...request.Option) (*autoscaling.DetachInstancesOutput, error) DetachInstancesRequest(*autoscaling.DetachInstancesInput) (*request.Request, *autoscaling.DetachInstancesOutput) @@ -276,6 +284,10 @@ type AutoScalingAPI interface { PutScheduledUpdateGroupActionWithContext(aws.Context, *autoscaling.PutScheduledUpdateGroupActionInput, ...request.Option) (*autoscaling.PutScheduledUpdateGroupActionOutput, error) PutScheduledUpdateGroupActionRequest(*autoscaling.PutScheduledUpdateGroupActionInput) (*request.Request, *autoscaling.PutScheduledUpdateGroupActionOutput) + PutWarmPool(*autoscaling.PutWarmPoolInput) (*autoscaling.PutWarmPoolOutput, error) + PutWarmPoolWithContext(aws.Context, *autoscaling.PutWarmPoolInput, ...request.Option) (*autoscaling.PutWarmPoolOutput, error) + PutWarmPoolRequest(*autoscaling.PutWarmPoolInput) (*request.Request, *autoscaling.PutWarmPoolOutput) + RecordLifecycleActionHeartbeat(*autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) RecordLifecycleActionHeartbeatWithContext(aws.Context, *autoscaling.RecordLifecycleActionHeartbeatInput, ...request.Option) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) RecordLifecycleActionHeartbeatRequest(*autoscaling.RecordLifecycleActionHeartbeatInput) (*request.Request, *autoscaling.RecordLifecycleActionHeartbeatOutput) diff --git a/service/autoscaling/examples_test.go b/service/autoscaling/examples_test.go index 3aa2642dc83..7e58a1b2950 100644 --- a/service/autoscaling/examples_test.go +++ b/service/autoscaling/examples_test.go @@ -242,12 +242,15 @@ func ExampleAutoScaling_CreateAutoScalingGroup_shared00() { func ExampleAutoScaling_CreateAutoScalingGroup_shared01() { svc := autoscaling.New(session.New()) input := &autoscaling.CreateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String("my-auto-scaling-group"), - HealthCheckGracePeriod: aws.Int64(120), - HealthCheckType: aws.String("ELB"), - LaunchConfigurationName: aws.String("my-launch-config"), - MaxSize: aws.Int64(3), - MinSize: aws.Int64(1), + AutoScalingGroupName: aws.String("my-auto-scaling-group"), + HealthCheckGracePeriod: aws.Int64(300), + HealthCheckType: aws.String("ELB"), + LaunchTemplate: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateId: aws.String("lt-0a20c965061f64abc"), + Version: aws.String("$Default"), + }, + MaxSize: aws.Int64(3), + MinSize: aws.Int64(1), TargetGroupARNs: []*string{ aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"), }, @@ -280,25 +283,47 @@ func ExampleAutoScaling_CreateAutoScalingGroup_shared01() { fmt.Println(result) } -// To create an Auto Scaling group with an attached load balancer +// To create an Auto Scaling group with a mixed instances policy // -// This example creates an Auto Scaling group and attaches the specified Classic Load -// Balancer. +// This example creates an Auto Scaling group with a mixed instances policy. It specifies +// the c5.large, c5a.large, and c6g.large instance types and defines a different launch +// template for the c6g.large instance type. func ExampleAutoScaling_CreateAutoScalingGroup_shared02() { svc := autoscaling.New(session.New()) input := &autoscaling.CreateAutoScalingGroupInput{ - AutoScalingGroupName: aws.String("my-auto-scaling-group"), - AvailabilityZones: []*string{ - aws.String("us-west-2c"), - }, - HealthCheckGracePeriod: aws.Int64(120), - HealthCheckType: aws.String("ELB"), - LaunchConfigurationName: aws.String("my-launch-config"), - LoadBalancerNames: []*string{ - aws.String("my-load-balancer"), + AutoScalingGroupName: aws.String("my-asg"), + DesiredCapacity: aws.Int64(3), + MaxSize: aws.Int64(5), + MinSize: aws.Int64(1), + MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{ + InstancesDistribution: &autoscaling.InstancesDistribution{ + OnDemandBaseCapacity: aws.Int64(1), + OnDemandPercentageAboveBaseCapacity: aws.Int64(50), + SpotAllocationStrategy: aws.String("capacity-optimized"), + }, + LaunchTemplate: &autoscaling.LaunchTemplate{ + LaunchTemplateSpecification: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateName: aws.String("my-launch-template-for-x86"), + Version: aws.String("$Latest"), + }, + Overrides: []*autoscaling.LaunchTemplateOverrides{ + { + InstanceType: aws.String("c6g.large"), + LaunchTemplateSpecification: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateName: aws.String("my-launch-template-for-arm"), + Version: aws.String("$Latest"), + }, + }, + { + InstanceType: aws.String("c5.large"), + }, + { + InstanceType: aws.String("c5a.large"), + }, + }, + }, }, - MaxSize: aws.Int64(3), - MinSize: aws.Int64(1), + VPCZoneIdentifier: aws.String("subnet-057fa0918fEXAMPLE, subnet-610acd08EXAMPLE"), } result, err := svc.CreateAutoScalingGroup(input) @@ -1594,7 +1619,7 @@ func ExampleAutoScaling_PutScalingPolicy_shared00() { TargetTrackingConfiguration: &autoscaling.TargetTrackingConfiguration{ PredefinedMetricSpecification: &autoscaling.PredefinedMetricSpecification{ PredefinedMetricType: aws.String("ALBRequestCountPerTarget"), - ResourceLabel: aws.String("app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d"), + ResourceLabel: aws.String("app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff"), }, TargetValue: aws.Float64(1000.000000), }, @@ -1663,6 +1688,39 @@ func ExampleAutoScaling_PutScheduledUpdateGroupAction_shared00() { fmt.Println(result) } +// To add a warm pool to an Auto Scaling group +// +// This example adds a warm pool to the specified Auto Scaling group. +func ExampleAutoScaling_PutWarmPool_shared00() { + svc := autoscaling.New(session.New()) + input := &autoscaling.PutWarmPoolInput{ + AutoScalingGroupName: aws.String("my-auto-scaling-group"), + MinSize: aws.Int64(30), + PoolState: aws.String("Stopped"), + } + + result, err := svc.PutWarmPool(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case autoscaling.ErrCodeLimitExceededFault: + fmt.Println(autoscaling.ErrCodeLimitExceededFault, aerr.Error()) + case autoscaling.ErrCodeResourceContentionFault: + fmt.Println(autoscaling.ErrCodeResourceContentionFault, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} + // To record a lifecycle action heartbeat // // This example records a lifecycle action heartbeat to keep the instance in a pending diff --git a/service/customerprofiles/api.go b/service/customerprofiles/api.go index 03c6396b55b..d98f76785a0 100644 --- a/service/customerprofiles/api.go +++ b/service/customerprofiles/api.go @@ -2987,17 +2987,23 @@ func (s *BadRequestException) RequestID() string { return s.RespMetadata.RequestID } +// The operation to be performed on the provided source fields. type ConnectorOperator struct { _ struct{} `type:"structure"` + // The operation to be performed on the provided Marketo source fields. Marketo *string `type:"string" enum:"MarketoConnectorOperator"` + // The operation to be performed on the provided Amazon S3 source fields. S3 *string `type:"string" enum:"S3ConnectorOperator"` + // The operation to be performed on the provided Salesforce source fields. Salesforce *string `type:"string" enum:"SalesforceConnectorOperator"` + // The operation to be performed on the provided ServiceNow source fields. ServiceNow *string `type:"string" enum:"ServiceNowConnectorOperator"` + // The operation to be performed on the provided Zendesk source fields. Zendesk *string `type:"string" enum:"ZendeskConnectorOperator"` } @@ -4127,23 +4133,41 @@ func (s *DomainStats) SetTotalSize(v int64) *DomainStats { return s } +// The configurations that control how Customer Profiles retrieves data from +// the source, Amazon AppFlow. Customer Profiles uses this information to create +// an AppFlow flow on behalf of customers. type FlowDefinition struct { _ struct{} `type:"structure"` + // A description of the flow you want to create. Description *string `type:"string"` + // The specified name of the flow. Use underscores (_) or hyphens (-) only. + // Spaces are not allowed. + // // FlowName is a required field FlowName *string `type:"string" required:"true"` + // The Amazon Resource Name of the AWS Key Management Service (KMS) key you + // provide for encryption. + // // KmsArn is a required field KmsArn *string `min:"20" type:"string" required:"true"` + // The configuration that controls how Customer Profiles retrieves data from + // the source. + // // SourceFlowConfig is a required field SourceFlowConfig *SourceFlowConfig `type:"structure" required:"true"` + // A list of tasks that Customer Profiles performs while transferring the data + // in the flow run. + // // Tasks is a required field Tasks []*Task `type:"list" required:"true"` + // The trigger settings that determine how and when the flow runs. + // // TriggerConfig is a required field TriggerConfig *TriggerConfig `type:"structure" required:"true"` } @@ -4809,9 +4833,13 @@ func (s *GetProfileObjectTypeTemplateOutput) SetTemplateId(v string) *GetProfile return s } +// Specifies the configuration used when importing incremental records from +// the source. type IncrementalPullConfig struct { _ struct{} `type:"structure"` + // A field that specifies the date time or timestamp field as the criteria to + // use when importing incremental records from the source. DatetimeTypeFieldName *string `type:"string"` } @@ -5814,9 +5842,12 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe return s } +// The properties that are applied when Marketo is being used as a source. type MarketoSourceProperties struct { _ struct{} `type:"structure"` + // The object specified in the Marketo flow source. + // // Object is a required field Object *string `type:"string" required:"true"` } @@ -6177,6 +6208,8 @@ type PutIntegrationInput struct { // DomainName is a required field DomainName *string `location:"uri" locationName:"DomainName" min:"1" type:"string" required:"true"` + // The configuration that controls how Customer Profiles retrieves data from + // the source. FlowDefinition *FlowDefinition `type:"structure"` // The name of the profile object type. @@ -6782,12 +6815,17 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +// The properties that are applied when Amazon S3 is being used as the flow +// source. type S3SourceProperties struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket name where the source files are stored. + // // BucketName is a required field BucketName *string `min:"3" type:"string" required:"true"` + // The object key for the Amazon S3 bucket in which the source files are stored. BucketPrefix *string `type:"string"` } @@ -6829,13 +6867,19 @@ func (s *S3SourceProperties) SetBucketPrefix(v string) *S3SourceProperties { return s } +// The properties that are applied when Salesforce is being used as a source. type SalesforceSourceProperties struct { _ struct{} `type:"structure"` + // The flag that enables dynamic fetching of new (recently added) fields in + // the Salesforce objects while running a flow. EnableDynamicFieldUpdate *bool `type:"boolean"` + // Indicates whether Amazon AppFlow includes deleted files in the flow run. IncludeDeletedRecords *bool `type:"boolean"` + // The object specified in the Salesforce flow source. + // // Object is a required field Object *string `type:"string" required:"true"` } @@ -6881,22 +6925,37 @@ func (s *SalesforceSourceProperties) SetObject(v string) *SalesforceSourceProper return s } +// Specifies the configuration details of a scheduled-trigger flow that you +// define. Currently, these settings only apply to the scheduled-trigger type. type ScheduledTriggerProperties struct { _ struct{} `type:"structure"` + // Specifies whether a scheduled flow has an incremental data transfer or a + // complete data transfer for each flow run. DataPullMode *string `type:"string" enum:"DataPullMode"` + // Specifies the date range for the records to import from the connector in + // the first flow run. FirstExecutionFrom *time.Time `type:"timestamp"` + // Specifies the scheduled end time for a scheduled-trigger flow. ScheduleEndTime *time.Time `type:"timestamp"` + // The scheduling expression that determines the rate at which the schedule + // will run, for example rate (5 minutes). + // // ScheduleExpression is a required field ScheduleExpression *string `type:"string" required:"true"` + // Specifies the optional offset that is added to the time interval for a schedule-triggered + // flow. ScheduleOffset *int64 `type:"long"` + // Specifies the scheduled start time for a scheduled-trigger flow. ScheduleStartTime *time.Time `type:"timestamp"` + // Specifies the time zone used when referring to the date and time of a scheduled-triggered + // flow, such as America/New_York. Timezone *string `type:"string"` } @@ -7096,9 +7155,12 @@ func (s *SearchProfilesOutput) SetNextToken(v string) *SearchProfilesOutput { return s } +// The properties that are applied when ServiceNow is being used as a source. type ServiceNowSourceProperties struct { _ struct{} `type:"structure"` + // The object specified in the ServiceNow flow source. + // // Object is a required field Object *string `type:"string" required:"true"` } @@ -7132,17 +7194,26 @@ func (s *ServiceNowSourceProperties) SetObject(v string) *ServiceNowSourceProper return s } +// Specifies the information that is required to query a particular Amazon AppFlow +// connector. Customer Profiles supports Salesforce, Zendesk, Marketo, ServiceNow +// and Amazon S3. type SourceConnectorProperties struct { _ struct{} `type:"structure"` + // The properties that are applied when Marketo is being used as a source. Marketo *MarketoSourceProperties `type:"structure"` + // The properties that are applied when Amazon S3 is being used as the flow + // source. S3 *S3SourceProperties `type:"structure"` + // The properties that are applied when Salesforce is being used as a source. Salesforce *SalesforceSourceProperties `type:"structure"` + // The properties that are applied when ServiceNow is being used as a source. ServiceNow *ServiceNowSourceProperties `type:"structure"` + // The properties that are applied when using Zendesk as a flow source. Zendesk *ZendeskSourceProperties `type:"structure"` } @@ -7221,16 +7292,27 @@ func (s *SourceConnectorProperties) SetZendesk(v *ZendeskSourceProperties) *Sour return s } +// Contains information about the configuration of the source connector used +// in the flow. type SourceFlowConfig struct { _ struct{} `type:"structure"` + // The name of the AppFlow connector profile. This name must be unique for each + // connector profile in the AWS account. ConnectorProfileName *string `type:"string"` + // The type of connector, such as Salesforce, Marketo, and so on. + // // ConnectorType is a required field ConnectorType *string `type:"string" required:"true" enum:"SourceConnectorType"` + // Defines the configuration for a scheduled incremental data pull. If a valid + // configuration is provided, the fields specified in the configuration are + // used when querying for the incremental data pull. IncrementalPullConfig *IncrementalPullConfig `type:"structure"` + // Specifies the information that is required to query a particular source connector. + // // SourceConnectorProperties is a required field SourceConnectorProperties *SourceConnectorProperties `type:"structure" required:"true"` } @@ -7362,18 +7444,29 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// A class for modeling different type of tasks. Task implementation varies +// based on the TaskType. type Task struct { _ struct{} `type:"structure"` + // The operation to be performed on the provided source fields. ConnectorOperator *ConnectorOperator `type:"structure"` + // A field in a destination connector, or a field value against which Amazon + // AppFlow validates a source field. DestinationField *string `type:"string"` + // The source fields to which a particular task is applied. + // // SourceFields is a required field SourceFields []*string `type:"list" required:"true"` + // A map used to store task-related information. The service looks for particular + // information based on the TaskType. TaskProperties map[string]*string `type:"map"` + // Specifies the particular task implementation that Amazon AppFlow performs. + // // TaskType is a required field TaskType *string `type:"string" required:"true" enum:"TaskType"` } @@ -7490,11 +7583,17 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } +// The trigger settings that determine how and when Amazon AppFlow runs the +// specified flow. type TriggerConfig struct { _ struct{} `type:"structure"` + // Specifies the configuration details of a schedule-triggered flow that you + // define. Currently, these settings only apply to the Scheduled trigger type. TriggerProperties *TriggerProperties `type:"structure"` + // Specifies the type of flow trigger. It can be OnDemand, Scheduled, or Event. + // // TriggerType is a required field TriggerType *string `type:"string" required:"true" enum:"TriggerType"` } @@ -7539,9 +7638,13 @@ func (s *TriggerConfig) SetTriggerType(v string) *TriggerConfig { return s } +// Specifies the configuration details that control the trigger for a flow. +// Currently, these settings only apply to the Scheduled trigger type. type TriggerProperties struct { _ struct{} `type:"structure"` + // Specifies the configuration details of a schedule-triggered flow that you + // define. Scheduled *ScheduledTriggerProperties `type:"structure"` } @@ -8201,9 +8304,12 @@ func (s *UpdateProfileOutput) SetProfileId(v string) *UpdateProfileOutput { return s } +// The properties that are applied when using Zendesk as a flow source. type ZendeskSourceProperties struct { _ struct{} `type:"structure"` + // The object specified in the Zendesk flow source. + // // Object is a required field Object *string `type:"string" required:"true"` } diff --git a/service/customerprofiles/doc.go b/service/customerprofiles/doc.go index c1d48f9be80..7cd7f4f36d5 100644 --- a/service/customerprofiles/doc.go +++ b/service/customerprofiles/doc.go @@ -13,7 +13,7 @@ // (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with // contact history from your Amazon Connect contact center. // -// If you're new to Amazon Connect, you might find it helpful to also review +// If you're new to Amazon Connect , you might find it helpful to also review // the Amazon Connect Administrator Guide (https://docs.aws.amazon.com/connect/latest/adminguide/what-is-amazon-connect.html). // // See https://docs.aws.amazon.com/goto/WebAPI/customer-profiles-2020-08-15 for more information on this service. diff --git a/service/kinesisvideoarchivedmedia/api.go b/service/kinesisvideoarchivedmedia/api.go index 398b35baebb..df32b9add37 100644 --- a/service/kinesisvideoarchivedmedia/api.go +++ b/service/kinesisvideoarchivedmedia/api.go @@ -1776,7 +1776,7 @@ type GetHLSStreamingSessionURLInput struct { // to use a value of NEVER to ensure the media player timeline most accurately // maps to the producer timestamps. // - // * ON_DISCONTIUNITY: a discontinuity marker is placed between fragments + // * ON_DISCONTINUITY: a discontinuity marker is placed between fragments // that have a gap or overlap of more than 50 milliseconds. For most playback // scenarios, it is recommended to use a value of ON_DISCONTINUITY so that // the media player timeline is only reset when there is a significant issue diff --git a/service/lookoutequipment/api.go b/service/lookoutequipment/api.go new file mode 100644 index 00000000000..d2c393aeabb --- /dev/null +++ b/service/lookoutequipment/api.go @@ -0,0 +1,6744 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lookoutequipment + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCreateDataset = "CreateDataset" + +// CreateDatasetRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataset for more information on using the CreateDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDatasetRequest method. +// req, resp := client.CreateDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateDataset +func (c *LookoutEquipment) CreateDatasetRequest(input *CreateDatasetInput) (req *request.Request, output *CreateDatasetOutput) { + op := &request.Operation{ + Name: opCreateDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDatasetInput{} + } + + output = &CreateDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataset API operation for Amazon Lookout for Equipment. +// +// Creates a container for a collection of data being ingested for analysis. +// The dataset contains the metadata describing where the data is and what the +// data actually looks like. In other words, it contains the location of the +// data source, the data schema, and other information. A dataset also contains +// any tags associated with the ingested data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation CreateDataset for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * ServiceQuotaExceededException +// Resource limitations have been exceeded. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateDataset +func (c *LookoutEquipment) CreateDataset(input *CreateDatasetInput) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + return out, req.Send() +} + +// CreateDatasetWithContext is the same as CreateDataset with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) CreateDatasetWithContext(ctx aws.Context, input *CreateDatasetInput, opts ...request.Option) (*CreateDatasetOutput, error) { + req, out := c.CreateDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInferenceScheduler = "CreateInferenceScheduler" + +// CreateInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the CreateInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInferenceScheduler for more information on using the CreateInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInferenceSchedulerRequest method. +// req, resp := client.CreateInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateInferenceScheduler +func (c *LookoutEquipment) CreateInferenceSchedulerRequest(input *CreateInferenceSchedulerInput) (req *request.Request, output *CreateInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opCreateInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInferenceSchedulerInput{} + } + + output = &CreateInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Creates a scheduled inference. Scheduling an inference is setting up a continuous +// real-time inference plan to analyze new measurement data. When setting up +// the schedule, you provide an S3 bucket location for the input data, assign +// it a delimiter between separate entries in the data, set an offset delay +// if desired, and set the frequency of inferencing. You must also provide an +// S3 bucket location for the output data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation CreateInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ServiceQuotaExceededException +// Resource limitations have been exceeded. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateInferenceScheduler +func (c *LookoutEquipment) CreateInferenceScheduler(input *CreateInferenceSchedulerInput) (*CreateInferenceSchedulerOutput, error) { + req, out := c.CreateInferenceSchedulerRequest(input) + return out, req.Send() +} + +// CreateInferenceSchedulerWithContext is the same as CreateInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) CreateInferenceSchedulerWithContext(ctx aws.Context, input *CreateInferenceSchedulerInput, opts ...request.Option) (*CreateInferenceSchedulerOutput, error) { + req, out := c.CreateInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateModel = "CreateModel" + +// CreateModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateModel for more information on using the CreateModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateModelRequest method. +// req, resp := client.CreateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateModel +func (c *LookoutEquipment) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *CreateModelOutput) { + op := &request.Operation{ + Name: opCreateModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateModelInput{} + } + + output = &CreateModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateModel API operation for Amazon Lookout for Equipment. +// +// Creates an ML model for data inference. +// +// A machine-learning (ML) model is a mathematical model that finds patterns +// in your data. In Amazon Lookout for Equipment, the model learns the patterns +// of normal behavior and detects abnormal behavior that could be potential +// equipment failure (or maintenance events). The models are made by analyzing +// normal data and abnormalities in machine behavior that have already occurred. +// +// Your model is trained using a portion of the data from your dataset and uses +// that data to learn patterns of normal behavior and abnormal patterns that +// lead to equipment failure. Another portion of the data is used to evaluate +// the model's accuracy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation CreateModel for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * ServiceQuotaExceededException +// Resource limitations have been exceeded. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateModel +func (c *LookoutEquipment) CreateModel(input *CreateModelInput) (*CreateModelOutput, error) { + req, out := c.CreateModelRequest(input) + return out, req.Send() +} + +// CreateModelWithContext is the same as CreateModel with the addition of +// the ability to pass a context and additional request options. +// +// See CreateModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) CreateModelWithContext(ctx aws.Context, input *CreateModelInput, opts ...request.Option) (*CreateModelOutput, error) { + req, out := c.CreateModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDataset = "DeleteDataset" + +// DeleteDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataset for more information on using the DeleteDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDatasetRequest method. +// req, resp := client.DeleteDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteDataset +func (c *LookoutEquipment) DeleteDatasetRequest(input *DeleteDatasetInput) (req *request.Request, output *DeleteDatasetOutput) { + op := &request.Operation{ + Name: opDeleteDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDatasetInput{} + } + + output = &DeleteDatasetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataset API operation for Amazon Lookout for Equipment. +// +// Deletes a dataset and associated artifacts. The operation will check to see +// if any inference scheduler or data ingestion job is currently using the dataset, +// and if there isn't, the dataset, its metadata, and any associated data stored +// in S3 will be deleted. This does not affect any models that used this dataset +// for training and evaluation, but does prevent it from being used in the future. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DeleteDataset for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteDataset +func (c *LookoutEquipment) DeleteDataset(input *DeleteDatasetInput) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + return out, req.Send() +} + +// DeleteDatasetWithContext is the same as DeleteDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DeleteDatasetWithContext(ctx aws.Context, input *DeleteDatasetInput, opts ...request.Option) (*DeleteDatasetOutput, error) { + req, out := c.DeleteDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInferenceScheduler = "DeleteInferenceScheduler" + +// DeleteInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInferenceScheduler for more information on using the DeleteInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInferenceSchedulerRequest method. +// req, resp := client.DeleteInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteInferenceScheduler +func (c *LookoutEquipment) DeleteInferenceSchedulerRequest(input *DeleteInferenceSchedulerInput) (req *request.Request, output *DeleteInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opDeleteInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInferenceSchedulerInput{} + } + + output = &DeleteInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Deletes an inference scheduler that has been set up. Already processed output +// results are not affected. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DeleteInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteInferenceScheduler +func (c *LookoutEquipment) DeleteInferenceScheduler(input *DeleteInferenceSchedulerInput) (*DeleteInferenceSchedulerOutput, error) { + req, out := c.DeleteInferenceSchedulerRequest(input) + return out, req.Send() +} + +// DeleteInferenceSchedulerWithContext is the same as DeleteInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DeleteInferenceSchedulerWithContext(ctx aws.Context, input *DeleteInferenceSchedulerInput, opts ...request.Option) (*DeleteInferenceSchedulerOutput, error) { + req, out := c.DeleteInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteModel = "DeleteModel" + +// DeleteModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteModel for more information on using the DeleteModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteModelRequest method. +// req, resp := client.DeleteModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteModel +func (c *LookoutEquipment) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { + op := &request.Operation{ + Name: opDeleteModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteModelInput{} + } + + output = &DeleteModelOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteModel API operation for Amazon Lookout for Equipment. +// +// Deletes an ML model currently available for Amazon Lookout for Equipment. +// This will prevent it from being used with an inference scheduler, even one +// that is already set up. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DeleteModel for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DeleteModel +func (c *LookoutEquipment) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + return out, req.Send() +} + +// DeleteModelWithContext is the same as DeleteModel with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DeleteModelWithContext(ctx aws.Context, input *DeleteModelInput, opts ...request.Option) (*DeleteModelOutput, error) { + req, out := c.DeleteModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataIngestionJob = "DescribeDataIngestionJob" + +// DescribeDataIngestionJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataIngestionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataIngestionJob for more information on using the DescribeDataIngestionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDataIngestionJobRequest method. +// req, resp := client.DescribeDataIngestionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeDataIngestionJob +func (c *LookoutEquipment) DescribeDataIngestionJobRequest(input *DescribeDataIngestionJobInput) (req *request.Request, output *DescribeDataIngestionJobOutput) { + op := &request.Operation{ + Name: opDescribeDataIngestionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDataIngestionJobInput{} + } + + output = &DescribeDataIngestionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataIngestionJob API operation for Amazon Lookout for Equipment. +// +// Provides information on a specific data ingestion job such as creation time, +// dataset ARN, status, and so on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DescribeDataIngestionJob for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeDataIngestionJob +func (c *LookoutEquipment) DescribeDataIngestionJob(input *DescribeDataIngestionJobInput) (*DescribeDataIngestionJobOutput, error) { + req, out := c.DescribeDataIngestionJobRequest(input) + return out, req.Send() +} + +// DescribeDataIngestionJobWithContext is the same as DescribeDataIngestionJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataIngestionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DescribeDataIngestionJobWithContext(ctx aws.Context, input *DescribeDataIngestionJobInput, opts ...request.Option) (*DescribeDataIngestionJobOutput, error) { + req, out := c.DescribeDataIngestionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDataset = "DescribeDataset" + +// DescribeDatasetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataset operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataset for more information on using the DescribeDataset +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDatasetRequest method. +// req, resp := client.DescribeDatasetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeDataset +func (c *LookoutEquipment) DescribeDatasetRequest(input *DescribeDatasetInput) (req *request.Request, output *DescribeDatasetOutput) { + op := &request.Operation{ + Name: opDescribeDataset, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDatasetInput{} + } + + output = &DescribeDatasetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataset API operation for Amazon Lookout for Equipment. +// +// Provides information on a specified dataset such as the schema location, +// status, and so on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DescribeDataset for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeDataset +func (c *LookoutEquipment) DescribeDataset(input *DescribeDatasetInput) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + return out, req.Send() +} + +// DescribeDatasetWithContext is the same as DescribeDataset with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataset for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DescribeDatasetWithContext(ctx aws.Context, input *DescribeDatasetInput, opts ...request.Option) (*DescribeDatasetOutput, error) { + req, out := c.DescribeDatasetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInferenceScheduler = "DescribeInferenceScheduler" + +// DescribeInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInferenceScheduler for more information on using the DescribeInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInferenceSchedulerRequest method. +// req, resp := client.DescribeInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeInferenceScheduler +func (c *LookoutEquipment) DescribeInferenceSchedulerRequest(input *DescribeInferenceSchedulerInput) (req *request.Request, output *DescribeInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opDescribeInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInferenceSchedulerInput{} + } + + output = &DescribeInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Specifies information about the inference scheduler being used, including +// name, model, status, and associated metadata +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DescribeInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeInferenceScheduler +func (c *LookoutEquipment) DescribeInferenceScheduler(input *DescribeInferenceSchedulerInput) (*DescribeInferenceSchedulerOutput, error) { + req, out := c.DescribeInferenceSchedulerRequest(input) + return out, req.Send() +} + +// DescribeInferenceSchedulerWithContext is the same as DescribeInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DescribeInferenceSchedulerWithContext(ctx aws.Context, input *DescribeInferenceSchedulerInput, opts ...request.Option) (*DescribeInferenceSchedulerOutput, error) { + req, out := c.DescribeInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeModel = "DescribeModel" + +// DescribeModelRequest generates a "aws/request.Request" representing the +// client's request for the DescribeModel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeModel for more information on using the DescribeModel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeModelRequest method. +// req, resp := client.DescribeModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModel +func (c *LookoutEquipment) DescribeModelRequest(input *DescribeModelInput) (req *request.Request, output *DescribeModelOutput) { + op := &request.Operation{ + Name: opDescribeModel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeModelInput{} + } + + output = &DescribeModelOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeModel API operation for Amazon Lookout for Equipment. +// +// Provides overall information about a specific ML model, including model name +// and ARN, dataset, training and evaluation information, status, and so on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation DescribeModel for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModel +func (c *LookoutEquipment) DescribeModel(input *DescribeModelInput) (*DescribeModelOutput, error) { + req, out := c.DescribeModelRequest(input) + return out, req.Send() +} + +// DescribeModelWithContext is the same as DescribeModel with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeModel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) DescribeModelWithContext(ctx aws.Context, input *DescribeModelInput, opts ...request.Option) (*DescribeModelOutput, error) { + req, out := c.DescribeModelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDataIngestionJobs = "ListDataIngestionJobs" + +// ListDataIngestionJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListDataIngestionJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDataIngestionJobs for more information on using the ListDataIngestionJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDataIngestionJobsRequest method. +// req, resp := client.ListDataIngestionJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListDataIngestionJobs +func (c *LookoutEquipment) ListDataIngestionJobsRequest(input *ListDataIngestionJobsInput) (req *request.Request, output *ListDataIngestionJobsOutput) { + op := &request.Operation{ + Name: opListDataIngestionJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataIngestionJobsInput{} + } + + output = &ListDataIngestionJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDataIngestionJobs API operation for Amazon Lookout for Equipment. +// +// Provides a list of all data ingestion jobs, including dataset name and ARN, +// S3 location of the input data, status, and so on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListDataIngestionJobs for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListDataIngestionJobs +func (c *LookoutEquipment) ListDataIngestionJobs(input *ListDataIngestionJobsInput) (*ListDataIngestionJobsOutput, error) { + req, out := c.ListDataIngestionJobsRequest(input) + return out, req.Send() +} + +// ListDataIngestionJobsWithContext is the same as ListDataIngestionJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListDataIngestionJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListDataIngestionJobsWithContext(ctx aws.Context, input *ListDataIngestionJobsInput, opts ...request.Option) (*ListDataIngestionJobsOutput, error) { + req, out := c.ListDataIngestionJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDataIngestionJobsPages iterates over the pages of a ListDataIngestionJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDataIngestionJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDataIngestionJobs operation. +// pageNum := 0 +// err := client.ListDataIngestionJobsPages(params, +// func(page *lookoutequipment.ListDataIngestionJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LookoutEquipment) ListDataIngestionJobsPages(input *ListDataIngestionJobsInput, fn func(*ListDataIngestionJobsOutput, bool) bool) error { + return c.ListDataIngestionJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDataIngestionJobsPagesWithContext same as ListDataIngestionJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListDataIngestionJobsPagesWithContext(ctx aws.Context, input *ListDataIngestionJobsInput, fn func(*ListDataIngestionJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDataIngestionJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDataIngestionJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDataIngestionJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatasets = "ListDatasets" + +// ListDatasetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDatasets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatasets for more information on using the ListDatasets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDatasetsRequest method. +// req, resp := client.ListDatasetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListDatasets +func (c *LookoutEquipment) ListDatasetsRequest(input *ListDatasetsInput) (req *request.Request, output *ListDatasetsOutput) { + op := &request.Operation{ + Name: opListDatasets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatasetsInput{} + } + + output = &ListDatasetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatasets API operation for Amazon Lookout for Equipment. +// +// Lists all datasets currently available in your account, filtering on the +// dataset name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListDatasets for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListDatasets +func (c *LookoutEquipment) ListDatasets(input *ListDatasetsInput) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + return out, req.Send() +} + +// ListDatasetsWithContext is the same as ListDatasets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatasets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) { + req, out := c.ListDatasetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatasetsPages iterates over the pages of a ListDatasets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatasets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatasets operation. +// pageNum := 0 +// err := client.ListDatasetsPages(params, +// func(page *lookoutequipment.ListDatasetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LookoutEquipment) ListDatasetsPages(input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool) error { + return c.ListDatasetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatasetsPagesWithContext same as ListDatasetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListDatasetsPagesWithContext(ctx aws.Context, input *ListDatasetsInput, fn func(*ListDatasetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatasetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatasetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatasetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListInferenceExecutions = "ListInferenceExecutions" + +// ListInferenceExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListInferenceExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInferenceExecutions for more information on using the ListInferenceExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInferenceExecutionsRequest method. +// req, resp := client.ListInferenceExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListInferenceExecutions +func (c *LookoutEquipment) ListInferenceExecutionsRequest(input *ListInferenceExecutionsInput) (req *request.Request, output *ListInferenceExecutionsOutput) { + op := &request.Operation{ + Name: opListInferenceExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInferenceExecutionsInput{} + } + + output = &ListInferenceExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInferenceExecutions API operation for Amazon Lookout for Equipment. +// +// Lists all inference executions that have been performed by the specified +// inference scheduler. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListInferenceExecutions for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListInferenceExecutions +func (c *LookoutEquipment) ListInferenceExecutions(input *ListInferenceExecutionsInput) (*ListInferenceExecutionsOutput, error) { + req, out := c.ListInferenceExecutionsRequest(input) + return out, req.Send() +} + +// ListInferenceExecutionsWithContext is the same as ListInferenceExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListInferenceExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListInferenceExecutionsWithContext(ctx aws.Context, input *ListInferenceExecutionsInput, opts ...request.Option) (*ListInferenceExecutionsOutput, error) { + req, out := c.ListInferenceExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInferenceExecutionsPages iterates over the pages of a ListInferenceExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInferenceExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInferenceExecutions operation. +// pageNum := 0 +// err := client.ListInferenceExecutionsPages(params, +// func(page *lookoutequipment.ListInferenceExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LookoutEquipment) ListInferenceExecutionsPages(input *ListInferenceExecutionsInput, fn func(*ListInferenceExecutionsOutput, bool) bool) error { + return c.ListInferenceExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInferenceExecutionsPagesWithContext same as ListInferenceExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListInferenceExecutionsPagesWithContext(ctx aws.Context, input *ListInferenceExecutionsInput, fn func(*ListInferenceExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInferenceExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInferenceExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListInferenceExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListInferenceSchedulers = "ListInferenceSchedulers" + +// ListInferenceSchedulersRequest generates a "aws/request.Request" representing the +// client's request for the ListInferenceSchedulers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInferenceSchedulers for more information on using the ListInferenceSchedulers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInferenceSchedulersRequest method. +// req, resp := client.ListInferenceSchedulersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListInferenceSchedulers +func (c *LookoutEquipment) ListInferenceSchedulersRequest(input *ListInferenceSchedulersInput) (req *request.Request, output *ListInferenceSchedulersOutput) { + op := &request.Operation{ + Name: opListInferenceSchedulers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInferenceSchedulersInput{} + } + + output = &ListInferenceSchedulersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInferenceSchedulers API operation for Amazon Lookout for Equipment. +// +// Retrieves a list of all inference schedulers currently available for your +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListInferenceSchedulers for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListInferenceSchedulers +func (c *LookoutEquipment) ListInferenceSchedulers(input *ListInferenceSchedulersInput) (*ListInferenceSchedulersOutput, error) { + req, out := c.ListInferenceSchedulersRequest(input) + return out, req.Send() +} + +// ListInferenceSchedulersWithContext is the same as ListInferenceSchedulers with the addition of +// the ability to pass a context and additional request options. +// +// See ListInferenceSchedulers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListInferenceSchedulersWithContext(ctx aws.Context, input *ListInferenceSchedulersInput, opts ...request.Option) (*ListInferenceSchedulersOutput, error) { + req, out := c.ListInferenceSchedulersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInferenceSchedulersPages iterates over the pages of a ListInferenceSchedulers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInferenceSchedulers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInferenceSchedulers operation. +// pageNum := 0 +// err := client.ListInferenceSchedulersPages(params, +// func(page *lookoutequipment.ListInferenceSchedulersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LookoutEquipment) ListInferenceSchedulersPages(input *ListInferenceSchedulersInput, fn func(*ListInferenceSchedulersOutput, bool) bool) error { + return c.ListInferenceSchedulersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInferenceSchedulersPagesWithContext same as ListInferenceSchedulersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListInferenceSchedulersPagesWithContext(ctx aws.Context, input *ListInferenceSchedulersInput, fn func(*ListInferenceSchedulersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInferenceSchedulersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInferenceSchedulersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListInferenceSchedulersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListModels = "ListModels" + +// ListModelsRequest generates a "aws/request.Request" representing the +// client's request for the ListModels operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListModels for more information on using the ListModels +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListModelsRequest method. +// req, resp := client.ListModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListModels +func (c *LookoutEquipment) ListModelsRequest(input *ListModelsInput) (req *request.Request, output *ListModelsOutput) { + op := &request.Operation{ + Name: opListModels, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListModelsInput{} + } + + output = &ListModelsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListModels API operation for Amazon Lookout for Equipment. +// +// Generates a list of all models in the account, including model name and ARN, +// dataset, and status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListModels for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListModels +func (c *LookoutEquipment) ListModels(input *ListModelsInput) (*ListModelsOutput, error) { + req, out := c.ListModelsRequest(input) + return out, req.Send() +} + +// ListModelsWithContext is the same as ListModels with the addition of +// the ability to pass a context and additional request options. +// +// See ListModels for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListModelsWithContext(ctx aws.Context, input *ListModelsInput, opts ...request.Option) (*ListModelsOutput, error) { + req, out := c.ListModelsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListModelsPages iterates over the pages of a ListModels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListModels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListModels operation. +// pageNum := 0 +// err := client.ListModelsPages(params, +// func(page *lookoutequipment.ListModelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *LookoutEquipment) ListModelsPages(input *ListModelsInput, fn func(*ListModelsOutput, bool) bool) error { + return c.ListModelsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListModelsPagesWithContext same as ListModelsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListModelsPagesWithContext(ctx aws.Context, input *ListModelsInput, fn func(*ListModelsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListModelsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListModelsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListModelsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListTagsForResource +func (c *LookoutEquipment) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Lookout for Equipment. +// +// Lists all the tags for a specified resource, including key and value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListTagsForResource +func (c *LookoutEquipment) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDataIngestionJob = "StartDataIngestionJob" + +// StartDataIngestionJobRequest generates a "aws/request.Request" representing the +// client's request for the StartDataIngestionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDataIngestionJob for more information on using the StartDataIngestionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartDataIngestionJobRequest method. +// req, resp := client.StartDataIngestionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StartDataIngestionJob +func (c *LookoutEquipment) StartDataIngestionJobRequest(input *StartDataIngestionJobInput) (req *request.Request, output *StartDataIngestionJobOutput) { + op := &request.Operation{ + Name: opStartDataIngestionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartDataIngestionJobInput{} + } + + output = &StartDataIngestionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartDataIngestionJob API operation for Amazon Lookout for Equipment. +// +// Starts a data ingestion job. Amazon Lookout for Equipment returns the job +// status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation StartDataIngestionJob for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * ServiceQuotaExceededException +// Resource limitations have been exceeded. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StartDataIngestionJob +func (c *LookoutEquipment) StartDataIngestionJob(input *StartDataIngestionJobInput) (*StartDataIngestionJobOutput, error) { + req, out := c.StartDataIngestionJobRequest(input) + return out, req.Send() +} + +// StartDataIngestionJobWithContext is the same as StartDataIngestionJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartDataIngestionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) StartDataIngestionJobWithContext(ctx aws.Context, input *StartDataIngestionJobInput, opts ...request.Option) (*StartDataIngestionJobOutput, error) { + req, out := c.StartDataIngestionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartInferenceScheduler = "StartInferenceScheduler" + +// StartInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the StartInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartInferenceScheduler for more information on using the StartInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartInferenceSchedulerRequest method. +// req, resp := client.StartInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StartInferenceScheduler +func (c *LookoutEquipment) StartInferenceSchedulerRequest(input *StartInferenceSchedulerInput) (req *request.Request, output *StartInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opStartInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInferenceSchedulerInput{} + } + + output = &StartInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Starts an inference scheduler. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation StartInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StartInferenceScheduler +func (c *LookoutEquipment) StartInferenceScheduler(input *StartInferenceSchedulerInput) (*StartInferenceSchedulerOutput, error) { + req, out := c.StartInferenceSchedulerRequest(input) + return out, req.Send() +} + +// StartInferenceSchedulerWithContext is the same as StartInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See StartInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) StartInferenceSchedulerWithContext(ctx aws.Context, input *StartInferenceSchedulerInput, opts ...request.Option) (*StartInferenceSchedulerOutput, error) { + req, out := c.StartInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopInferenceScheduler = "StopInferenceScheduler" + +// StopInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the StopInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopInferenceScheduler for more information on using the StopInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopInferenceSchedulerRequest method. +// req, resp := client.StopInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StopInferenceScheduler +func (c *LookoutEquipment) StopInferenceSchedulerRequest(input *StopInferenceSchedulerInput) (req *request.Request, output *StopInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opStopInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInferenceSchedulerInput{} + } + + output = &StopInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Stops an inference scheduler. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation StopInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/StopInferenceScheduler +func (c *LookoutEquipment) StopInferenceScheduler(input *StopInferenceSchedulerInput) (*StopInferenceSchedulerOutput, error) { + req, out := c.StopInferenceSchedulerRequest(input) + return out, req.Send() +} + +// StopInferenceSchedulerWithContext is the same as StopInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See StopInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) StopInferenceSchedulerWithContext(ctx aws.Context, input *StopInferenceSchedulerInput, opts ...request.Option) (*StopInferenceSchedulerOutput, error) { + req, out := c.StopInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/TagResource +func (c *LookoutEquipment) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Lookout for Equipment. +// +// Associates a given tag to a resource in your account. A tag is a key-value +// pair which can be added to an Amazon Lookout for Equipment resource as metadata. +// Tags can be used for organizing your resources as well as helping you to +// search and filter by tag. Multiple tags can be added to a resource, either +// when you create it, or later. Up to 50 tags can be associated with each resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ServiceQuotaExceededException +// Resource limitations have been exceeded. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/TagResource +func (c *LookoutEquipment) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/UntagResource +func (c *LookoutEquipment) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Lookout for Equipment. +// +// Removes a specific tag from a given resource. The tag is specified by its +// key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/UntagResource +func (c *LookoutEquipment) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateInferenceScheduler = "UpdateInferenceScheduler" + +// UpdateInferenceSchedulerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateInferenceScheduler operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateInferenceScheduler for more information on using the UpdateInferenceScheduler +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateInferenceSchedulerRequest method. +// req, resp := client.UpdateInferenceSchedulerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/UpdateInferenceScheduler +func (c *LookoutEquipment) UpdateInferenceSchedulerRequest(input *UpdateInferenceSchedulerInput) (req *request.Request, output *UpdateInferenceSchedulerOutput) { + op := &request.Operation{ + Name: opUpdateInferenceScheduler, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateInferenceSchedulerInput{} + } + + output = &UpdateInferenceSchedulerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateInferenceScheduler API operation for Amazon Lookout for Equipment. +// +// Updates an inference scheduler. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lookout for Equipment's +// API operation UpdateInferenceScheduler for usage and error information. +// +// Returned Error Types: +// * ConflictException +// The request could not be completed due to a conflict with the current state +// of the target resource. +// +// * ResourceNotFoundException +// The resource requested could not be found. Verify the resource ID and retry +// your request. +// +// * ValidationException +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +// +// * ThrottlingException +// The request was denied due to request throttling. +// +// * AccessDeniedException +// The request could not be completed because you do not have access to the +// resource. +// +// * InternalServerException +// Processing of the request has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/UpdateInferenceScheduler +func (c *LookoutEquipment) UpdateInferenceScheduler(input *UpdateInferenceSchedulerInput) (*UpdateInferenceSchedulerOutput, error) { + req, out := c.UpdateInferenceSchedulerRequest(input) + return out, req.Send() +} + +// UpdateInferenceSchedulerWithContext is the same as UpdateInferenceScheduler with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateInferenceScheduler for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *LookoutEquipment) UpdateInferenceSchedulerWithContext(ctx aws.Context, input *UpdateInferenceSchedulerInput, opts ...request.Option) (*UpdateInferenceSchedulerOutput, error) { + req, out := c.UpdateInferenceSchedulerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// The request could not be completed because you do not have access to the +// resource. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request could not be completed due to a conflict with the current state +// of the target resource. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateDatasetInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the request. If you do not set the client request + // token, Amazon Lookout for Equipment generates one. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The name of the dataset being created. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` + + // A JSON description of the data that is in each time series dataset, including + // names, column names, and data types. + // + // DatasetSchema is a required field + DatasetSchema *DatasetSchema `type:"structure" required:"true"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt dataset data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Any tags associated with the ingested data described in the dataset. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDatasetInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.DatasetSchema == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetSchema")) + } + if s.ServerSideKmsKeyId != nil && len(*s.ServerSideKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerSideKmsKeyId", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateDatasetInput) SetClientToken(v string) *CreateDatasetInput { + s.ClientToken = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetInput) SetDatasetName(v string) *CreateDatasetInput { + s.DatasetName = &v + return s +} + +// SetDatasetSchema sets the DatasetSchema field's value. +func (s *CreateDatasetInput) SetDatasetSchema(v *DatasetSchema) *CreateDatasetInput { + s.DatasetSchema = v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *CreateDatasetInput) SetServerSideKmsKeyId(v string) *CreateDatasetInput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDatasetInput) SetTags(v []*Tag) *CreateDatasetInput { + s.Tags = v + return s +} + +type CreateDatasetOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset being created. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset being created. + DatasetName *string `min:"1" type:"string"` + + // Indicates the status of the CreateDataset operation. + Status *string `type:"string" enum:"DatasetStatus"` +} + +// String returns the string representation +func (s CreateDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDatasetOutput) GoString() string { + return s.String() +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *CreateDatasetOutput) SetDatasetArn(v string) *CreateDatasetOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateDatasetOutput) SetDatasetName(v string) *CreateDatasetOutput { + s.DatasetName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateDatasetOutput) SetStatus(v string) *CreateDatasetOutput { + s.Status = &v + return s +} + +type CreateInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the request. If you do not set the client request + // token, Amazon Lookout for Equipment generates one. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // A period of time (in minutes) by which inference on the data is delayed after + // the data starts. For instance, if you select an offset delay time of five + // minutes, inference will not begin on the data until the first data measurement + // after the five minute mark. For example, if five minutes is selected, the + // inference scheduler will wake up at the configured frequency with the additional + // five minute delay time to check the customer S3 bucket. The customer can + // upload data at the same frequency and they don't need to stop and restart + // the scheduler when uploading new data. + DataDelayOffsetInMinutes *int64 `type:"long"` + + // Specifies configuration information for the input data for the inference + // scheduler, including delimiter, format, and dataset location. + // + // DataInputConfiguration is a required field + DataInputConfiguration *InferenceInputConfiguration `type:"structure" required:"true"` + + // Specifies configuration information for the output results for the inference + // scheduler, including the S3 location for the output. + // + // DataOutputConfiguration is a required field + DataOutputConfiguration *InferenceOutputConfiguration `type:"structure" required:"true"` + + // How often data is uploaded to the source S3 bucket for the input data. The + // value chosen is the length of time between data uploads. For instance, if + // you select 5 minutes, Amazon Lookout for Equipment will upload the real-time + // data to the source bucket once every 5 minutes. This frequency also determines + // how often Amazon Lookout for Equipment starts a scheduled inference on your + // data. In this example, it starts once every 5 minutes. + // + // DataUploadFrequency is a required field + DataUploadFrequency *string `type:"string" required:"true" enum:"DataUploadFrequency"` + + // The name of the inference scheduler being created. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` + + // The name of the previously trained ML model being used to create the inference + // scheduler. + // + // ModelName is a required field + ModelName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source being used for the inference. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt inference scheduler data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Any tags associated with the inference scheduler. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInferenceSchedulerInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DataInputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("DataInputConfiguration")) + } + if s.DataOutputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("DataOutputConfiguration")) + } + if s.DataUploadFrequency == nil { + invalidParams.Add(request.NewErrParamRequired("DataUploadFrequency")) + } + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.ModelName != nil && len(*s.ModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ServerSideKmsKeyId != nil && len(*s.ServerSideKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerSideKmsKeyId", 1)) + } + if s.DataInputConfiguration != nil { + if err := s.DataInputConfiguration.Validate(); err != nil { + invalidParams.AddNested("DataInputConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.DataOutputConfiguration != nil { + if err := s.DataOutputConfiguration.Validate(); err != nil { + invalidParams.AddNested("DataOutputConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateInferenceSchedulerInput) SetClientToken(v string) *CreateInferenceSchedulerInput { + s.ClientToken = &v + return s +} + +// SetDataDelayOffsetInMinutes sets the DataDelayOffsetInMinutes field's value. +func (s *CreateInferenceSchedulerInput) SetDataDelayOffsetInMinutes(v int64) *CreateInferenceSchedulerInput { + s.DataDelayOffsetInMinutes = &v + return s +} + +// SetDataInputConfiguration sets the DataInputConfiguration field's value. +func (s *CreateInferenceSchedulerInput) SetDataInputConfiguration(v *InferenceInputConfiguration) *CreateInferenceSchedulerInput { + s.DataInputConfiguration = v + return s +} + +// SetDataOutputConfiguration sets the DataOutputConfiguration field's value. +func (s *CreateInferenceSchedulerInput) SetDataOutputConfiguration(v *InferenceOutputConfiguration) *CreateInferenceSchedulerInput { + s.DataOutputConfiguration = v + return s +} + +// SetDataUploadFrequency sets the DataUploadFrequency field's value. +func (s *CreateInferenceSchedulerInput) SetDataUploadFrequency(v string) *CreateInferenceSchedulerInput { + s.DataUploadFrequency = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *CreateInferenceSchedulerInput) SetInferenceSchedulerName(v string) *CreateInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *CreateInferenceSchedulerInput) SetModelName(v string) *CreateInferenceSchedulerInput { + s.ModelName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateInferenceSchedulerInput) SetRoleArn(v string) *CreateInferenceSchedulerInput { + s.RoleArn = &v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *CreateInferenceSchedulerInput) SetServerSideKmsKeyId(v string) *CreateInferenceSchedulerInput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateInferenceSchedulerInput) SetTags(v []*Tag) *CreateInferenceSchedulerInput { + s.Tags = v + return s +} + +type CreateInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the inference scheduler being created. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of inference scheduler being created. + InferenceSchedulerName *string `min:"1" type:"string"` + + // Indicates the status of the CreateInferenceScheduler operation. + Status *string `type:"string" enum:"InferenceSchedulerStatus"` +} + +// String returns the string representation +func (s CreateInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInferenceSchedulerOutput) GoString() string { + return s.String() +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *CreateInferenceSchedulerOutput) SetInferenceSchedulerArn(v string) *CreateInferenceSchedulerOutput { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *CreateInferenceSchedulerOutput) SetInferenceSchedulerName(v string) *CreateInferenceSchedulerOutput { + s.InferenceSchedulerName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateInferenceSchedulerOutput) SetStatus(v string) *CreateInferenceSchedulerOutput { + s.Status = &v + return s +} + +type CreateModelInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the request. If you do not set the client request + // token, Amazon Lookout for Equipment generates one. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The configuration is the TargetSamplingRate, which is the sampling rate of + // the data after post processing by Amazon Lookout for Equipment. For example, + // if you provide data that has been collected at a 1 second level and you want + // the system to resample the data at a 1 minute rate before training, the TargetSamplingRate + // is 1 minute. + // + // When providing a value for the TargetSamplingRate, you must attach the prefix + // "PT" to the rate you want. The value for a 1 second rate is therefore PT1S, + // the value for a 15 minute rate is PT15M, and the value for a 1 hour rate + // is PT1H + DataPreProcessingConfiguration *DataPreProcessingConfiguration `type:"structure"` + + // The name of the dataset for the ML model being created. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` + + // The data schema for the ML model being created. + DatasetSchema *DatasetSchema `type:"structure"` + + // Indicates the time reference in the dataset that should be used to end the + // subset of evaluation data for the ML model. + EvaluationDataEndTime *time.Time `type:"timestamp"` + + // Indicates the time reference in the dataset that should be used to begin + // the subset of evaluation data for the ML model. + EvaluationDataStartTime *time.Time `type:"timestamp"` + + // The input configuration for the labels being used for the ML model that's + // being created. + LabelsInputConfiguration *LabelsInputConfiguration `type:"structure"` + + // The name for the ML model to be created. + // + // ModelName is a required field + ModelName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source being used to create the ML model. + RoleArn *string `min:"20" type:"string"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt model data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Any tags associated with the ML model being created. + Tags []*Tag `type:"list"` + + // Indicates the time reference in the dataset that should be used to end the + // subset of training data for the ML model. + TrainingDataEndTime *time.Time `type:"timestamp"` + + // Indicates the time reference in the dataset that should be used to begin + // the subset of training data for the ML model. + TrainingDataStartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CreateModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateModelInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.ModelName != nil && len(*s.ModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.ServerSideKmsKeyId != nil && len(*s.ServerSideKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerSideKmsKeyId", 1)) + } + if s.LabelsInputConfiguration != nil { + if err := s.LabelsInputConfiguration.Validate(); err != nil { + invalidParams.AddNested("LabelsInputConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateModelInput) SetClientToken(v string) *CreateModelInput { + s.ClientToken = &v + return s +} + +// SetDataPreProcessingConfiguration sets the DataPreProcessingConfiguration field's value. +func (s *CreateModelInput) SetDataPreProcessingConfiguration(v *DataPreProcessingConfiguration) *CreateModelInput { + s.DataPreProcessingConfiguration = v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *CreateModelInput) SetDatasetName(v string) *CreateModelInput { + s.DatasetName = &v + return s +} + +// SetDatasetSchema sets the DatasetSchema field's value. +func (s *CreateModelInput) SetDatasetSchema(v *DatasetSchema) *CreateModelInput { + s.DatasetSchema = v + return s +} + +// SetEvaluationDataEndTime sets the EvaluationDataEndTime field's value. +func (s *CreateModelInput) SetEvaluationDataEndTime(v time.Time) *CreateModelInput { + s.EvaluationDataEndTime = &v + return s +} + +// SetEvaluationDataStartTime sets the EvaluationDataStartTime field's value. +func (s *CreateModelInput) SetEvaluationDataStartTime(v time.Time) *CreateModelInput { + s.EvaluationDataStartTime = &v + return s +} + +// SetLabelsInputConfiguration sets the LabelsInputConfiguration field's value. +func (s *CreateModelInput) SetLabelsInputConfiguration(v *LabelsInputConfiguration) *CreateModelInput { + s.LabelsInputConfiguration = v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *CreateModelInput) SetModelName(v string) *CreateModelInput { + s.ModelName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateModelInput) SetRoleArn(v string) *CreateModelInput { + s.RoleArn = &v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *CreateModelInput) SetServerSideKmsKeyId(v string) *CreateModelInput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateModelInput) SetTags(v []*Tag) *CreateModelInput { + s.Tags = v + return s +} + +// SetTrainingDataEndTime sets the TrainingDataEndTime field's value. +func (s *CreateModelInput) SetTrainingDataEndTime(v time.Time) *CreateModelInput { + s.TrainingDataEndTime = &v + return s +} + +// SetTrainingDataStartTime sets the TrainingDataStartTime field's value. +func (s *CreateModelInput) SetTrainingDataStartTime(v time.Time) *CreateModelInput { + s.TrainingDataStartTime = &v + return s +} + +type CreateModelOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the model being created. + ModelArn *string `min:"20" type:"string"` + + // Indicates the status of the CreateModel operation. + Status *string `type:"string" enum:"ModelStatus"` +} + +// String returns the string representation +func (s CreateModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateModelOutput) GoString() string { + return s.String() +} + +// SetModelArn sets the ModelArn field's value. +func (s *CreateModelOutput) SetModelArn(v string) *CreateModelOutput { + s.ModelArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateModelOutput) SetStatus(v string) *CreateModelOutput { + s.Status = &v + return s +} + +// Provides information about a specified data ingestion job, including dataset +// information, data ingestion configuration, and status. +type DataIngestionJobSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset used in the data ingestion + // job. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset used for the data ingestion job. + DatasetName *string `min:"1" type:"string"` + + // Specifies information for the input data for the data inference job, including + // data S3 location parameters. + IngestionInputConfiguration *IngestionInputConfiguration `type:"structure"` + + // Indicates the job ID of the data ingestion job. + JobId *string `type:"string"` + + // Indicates the status of the data ingestion job. + Status *string `type:"string" enum:"IngestionJobStatus"` +} + +// String returns the string representation +func (s DataIngestionJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataIngestionJobSummary) GoString() string { + return s.String() +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DataIngestionJobSummary) SetDatasetArn(v string) *DataIngestionJobSummary { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DataIngestionJobSummary) SetDatasetName(v string) *DataIngestionJobSummary { + s.DatasetName = &v + return s +} + +// SetIngestionInputConfiguration sets the IngestionInputConfiguration field's value. +func (s *DataIngestionJobSummary) SetIngestionInputConfiguration(v *IngestionInputConfiguration) *DataIngestionJobSummary { + s.IngestionInputConfiguration = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DataIngestionJobSummary) SetJobId(v string) *DataIngestionJobSummary { + s.JobId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DataIngestionJobSummary) SetStatus(v string) *DataIngestionJobSummary { + s.Status = &v + return s +} + +// The configuration is the TargetSamplingRate, which is the sampling rate of +// the data after post processing by Amazon Lookout for Equipment. For example, +// if you provide data that has been collected at a 1 second level and you want +// the system to resample the data at a 1 minute rate before training, the TargetSamplingRate +// is 1 minute. +// +// When providing a value for the TargetSamplingRate, you must attach the prefix +// "PT" to the rate you want. The value for a 1 second rate is therefore PT1S, +// the value for a 15 minute rate is PT15M, and the value for a 1 hour rate +// is PT1H +type DataPreProcessingConfiguration struct { + _ struct{} `type:"structure"` + + // The sampling rate of the data after post processing by Amazon Lookout for + // Equipment. For example, if you provide data that has been collected at a + // 1 second level and you want the system to resample the data at a 1 minute + // rate before training, the TargetSamplingRate is 1 minute. + // + // When providing a value for the TargetSamplingRate, you must attach the prefix + // "PT" to the rate you want. The value for a 1 second rate is therefore PT1S, + // the value for a 15 minute rate is PT15M, and the value for a 1 hour rate + // is PT1H + TargetSamplingRate *string `type:"string" enum:"TargetSamplingRate"` +} + +// String returns the string representation +func (s DataPreProcessingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataPreProcessingConfiguration) GoString() string { + return s.String() +} + +// SetTargetSamplingRate sets the TargetSamplingRate field's value. +func (s *DataPreProcessingConfiguration) SetTargetSamplingRate(v string) *DataPreProcessingConfiguration { + s.TargetSamplingRate = &v + return s +} + +// Provides information about the data schema used with the given dataset. +type DatasetSchema struct { + _ struct{} `type:"structure"` + + InlineDataSchema aws.JSONValue `type:"jsonvalue"` +} + +// String returns the string representation +func (s DatasetSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSchema) GoString() string { + return s.String() +} + +// SetInlineDataSchema sets the InlineDataSchema field's value. +func (s *DatasetSchema) SetInlineDataSchema(v aws.JSONValue) *DatasetSchema { + s.InlineDataSchema = v + return s +} + +// Contains information about the specific data set, including name, ARN, and +// status. +type DatasetSummary struct { + _ struct{} `type:"structure"` + + // The time at which the dataset was created in Amazon Lookout for Equipment. + CreatedAt *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the specified dataset. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset. + DatasetName *string `min:"1" type:"string"` + + // Indicates the status of the dataset. + Status *string `type:"string" enum:"DatasetStatus"` +} + +// String returns the string representation +func (s DatasetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatasetSummary) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DatasetSummary) SetCreatedAt(v time.Time) *DatasetSummary { + s.CreatedAt = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DatasetSummary) SetDatasetArn(v string) *DatasetSummary { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DatasetSummary) SetDatasetName(v string) *DatasetSummary { + s.DatasetName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DatasetSummary) SetStatus(v string) *DatasetSummary { + s.Status = &v + return s +} + +type DeleteDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset to be deleted. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DeleteDatasetInput) SetDatasetName(v string) *DeleteDatasetInput { + s.DatasetName = &v + return s +} + +type DeleteDatasetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDatasetOutput) GoString() string { + return s.String() +} + +type DeleteInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // The name of the inference scheduler to be deleted. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInferenceSchedulerInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *DeleteInferenceSchedulerInput) SetInferenceSchedulerName(v string) *DeleteInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +type DeleteInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInferenceSchedulerOutput) GoString() string { + return s.String() +} + +type DeleteModelInput struct { + _ struct{} `type:"structure"` + + // The name of the ML model to be deleted. + // + // ModelName is a required field + ModelName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.ModelName != nil && len(*s.ModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetModelName sets the ModelName field's value. +func (s *DeleteModelInput) SetModelName(v string) *DeleteModelInput { + s.ModelName = &v + return s +} + +type DeleteModelOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteModelOutput) GoString() string { + return s.String() +} + +type DescribeDataIngestionJobInput struct { + _ struct{} `type:"structure"` + + // The job ID of the data ingestion job. + // + // JobId is a required field + JobId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDataIngestionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataIngestionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataIngestionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataIngestionJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *DescribeDataIngestionJobInput) SetJobId(v string) *DescribeDataIngestionJobInput { + s.JobId = &v + return s +} + +type DescribeDataIngestionJobOutput struct { + _ struct{} `type:"structure"` + + // The time at which the data ingestion job was created. + CreatedAt *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset being used in the data ingestion + // job. + DatasetArn *string `min:"20" type:"string"` + + // Specifies the reason for failure when a data ingestion job has failed. + FailedReason *string `min:"1" type:"string"` + + // Specifies the S3 location configuration for the data input for the data ingestion + // job. + IngestionInputConfiguration *IngestionInputConfiguration `type:"structure"` + + // Indicates the job ID of the data ingestion job. + JobId *string `type:"string"` + + // The Amazon Resource Name (ARN) of an IAM role with permission to access the + // data source being ingested. + RoleArn *string `min:"20" type:"string"` + + // Indicates the status of the DataIngestionJob operation. + Status *string `type:"string" enum:"IngestionJobStatus"` +} + +// String returns the string representation +func (s DescribeDataIngestionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataIngestionJobOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DescribeDataIngestionJobOutput) SetCreatedAt(v time.Time) *DescribeDataIngestionJobOutput { + s.CreatedAt = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDataIngestionJobOutput) SetDatasetArn(v string) *DescribeDataIngestionJobOutput { + s.DatasetArn = &v + return s +} + +// SetFailedReason sets the FailedReason field's value. +func (s *DescribeDataIngestionJobOutput) SetFailedReason(v string) *DescribeDataIngestionJobOutput { + s.FailedReason = &v + return s +} + +// SetIngestionInputConfiguration sets the IngestionInputConfiguration field's value. +func (s *DescribeDataIngestionJobOutput) SetIngestionInputConfiguration(v *IngestionInputConfiguration) *DescribeDataIngestionJobOutput { + s.IngestionInputConfiguration = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DescribeDataIngestionJobOutput) SetJobId(v string) *DescribeDataIngestionJobOutput { + s.JobId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeDataIngestionJobOutput) SetRoleArn(v string) *DescribeDataIngestionJobOutput { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDataIngestionJobOutput) SetStatus(v string) *DescribeDataIngestionJobOutput { + s.Status = &v + return s +} + +type DescribeDatasetInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset to be described. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDatasetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDatasetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDatasetInput"} + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DescribeDatasetInput) SetDatasetName(v string) *DescribeDatasetInput { + s.DatasetName = &v + return s +} + +type DescribeDatasetOutput struct { + _ struct{} `type:"structure"` + + // Specifies the time the dataset was created in Amazon Lookout for Equipment. + CreatedAt *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset being described. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset being described. + DatasetName *string `min:"1" type:"string"` + + // Specifies the S3 location configuration for the data input for the data ingestion + // job. + IngestionInputConfiguration *IngestionInputConfiguration `type:"structure"` + + // Specifies the time the dataset was last updated, if it was. + LastUpdatedAt *time.Time `type:"timestamp"` + + // A JSON description of the data that is in each time series dataset, including + // names, column names, and data types. + Schema aws.JSONValue `type:"jsonvalue"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt dataset data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Indicates the status of the dataset. + Status *string `type:"string" enum:"DatasetStatus"` +} + +// String returns the string representation +func (s DescribeDatasetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDatasetOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DescribeDatasetOutput) SetCreatedAt(v time.Time) *DescribeDatasetOutput { + s.CreatedAt = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeDatasetOutput) SetDatasetArn(v string) *DescribeDatasetOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DescribeDatasetOutput) SetDatasetName(v string) *DescribeDatasetOutput { + s.DatasetName = &v + return s +} + +// SetIngestionInputConfiguration sets the IngestionInputConfiguration field's value. +func (s *DescribeDatasetOutput) SetIngestionInputConfiguration(v *IngestionInputConfiguration) *DescribeDatasetOutput { + s.IngestionInputConfiguration = v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *DescribeDatasetOutput) SetLastUpdatedAt(v time.Time) *DescribeDatasetOutput { + s.LastUpdatedAt = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *DescribeDatasetOutput) SetSchema(v aws.JSONValue) *DescribeDatasetOutput { + s.Schema = v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *DescribeDatasetOutput) SetServerSideKmsKeyId(v string) *DescribeDatasetOutput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDatasetOutput) SetStatus(v string) *DescribeDatasetOutput { + s.Status = &v + return s +} + +type DescribeInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // The name of the inference scheduler being described. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInferenceSchedulerInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *DescribeInferenceSchedulerInput) SetInferenceSchedulerName(v string) *DescribeInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +type DescribeInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` + + // Specifies the time at which the inference scheduler was created. + CreatedAt *time.Time `type:"timestamp"` + + // A period of time (in minutes) by which inference on the data is delayed after + // the data starts. For instance, if you select an offset delay time of five + // minutes, inference will not begin on the data until the first data measurement + // after the five minute mark. For example, if five minutes is selected, the + // inference scheduler will wake up at the configured frequency with the additional + // five minute delay time to check the customer S3 bucket. The customer can + // upload data at the same frequency and they don't need to stop and restart + // the scheduler when uploading new data. + DataDelayOffsetInMinutes *int64 `type:"long"` + + // Specifies configuration information for the input data for the inference + // scheduler, including delimiter, format, and dataset location. + DataInputConfiguration *InferenceInputConfiguration `type:"structure"` + + // Specifies information for the output results for the inference scheduler, + // including the output S3 location. + DataOutputConfiguration *InferenceOutputConfiguration `type:"structure"` + + // Specifies how often data is uploaded to the source S3 bucket for the input + // data. This value is the length of time between data uploads. For instance, + // if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time + // data to the source bucket once every 5 minutes. This frequency also determines + // how often Amazon Lookout for Equipment starts a scheduled inference on your + // data. In this example, it starts once every 5 minutes. + DataUploadFrequency *string `type:"string" enum:"DataUploadFrequency"` + + // The Amazon Resource Name (ARN) of the inference scheduler being described. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of the inference scheduler being described. + InferenceSchedulerName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model of the inference scheduler + // being described. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model of the inference scheduler being described. + ModelName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source for the inference scheduler being described. + RoleArn *string `min:"20" type:"string"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt inference scheduler data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Indicates the status of the inference scheduler. + Status *string `type:"string" enum:"InferenceSchedulerStatus"` + + // Specifies the time at which the inference scheduler was last updated, if + // it was. + UpdatedAt *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s DescribeInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInferenceSchedulerOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DescribeInferenceSchedulerOutput) SetCreatedAt(v time.Time) *DescribeInferenceSchedulerOutput { + s.CreatedAt = &v + return s +} + +// SetDataDelayOffsetInMinutes sets the DataDelayOffsetInMinutes field's value. +func (s *DescribeInferenceSchedulerOutput) SetDataDelayOffsetInMinutes(v int64) *DescribeInferenceSchedulerOutput { + s.DataDelayOffsetInMinutes = &v + return s +} + +// SetDataInputConfiguration sets the DataInputConfiguration field's value. +func (s *DescribeInferenceSchedulerOutput) SetDataInputConfiguration(v *InferenceInputConfiguration) *DescribeInferenceSchedulerOutput { + s.DataInputConfiguration = v + return s +} + +// SetDataOutputConfiguration sets the DataOutputConfiguration field's value. +func (s *DescribeInferenceSchedulerOutput) SetDataOutputConfiguration(v *InferenceOutputConfiguration) *DescribeInferenceSchedulerOutput { + s.DataOutputConfiguration = v + return s +} + +// SetDataUploadFrequency sets the DataUploadFrequency field's value. +func (s *DescribeInferenceSchedulerOutput) SetDataUploadFrequency(v string) *DescribeInferenceSchedulerOutput { + s.DataUploadFrequency = &v + return s +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *DescribeInferenceSchedulerOutput) SetInferenceSchedulerArn(v string) *DescribeInferenceSchedulerOutput { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *DescribeInferenceSchedulerOutput) SetInferenceSchedulerName(v string) *DescribeInferenceSchedulerOutput { + s.InferenceSchedulerName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *DescribeInferenceSchedulerOutput) SetModelArn(v string) *DescribeInferenceSchedulerOutput { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *DescribeInferenceSchedulerOutput) SetModelName(v string) *DescribeInferenceSchedulerOutput { + s.ModelName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeInferenceSchedulerOutput) SetRoleArn(v string) *DescribeInferenceSchedulerOutput { + s.RoleArn = &v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *DescribeInferenceSchedulerOutput) SetServerSideKmsKeyId(v string) *DescribeInferenceSchedulerOutput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeInferenceSchedulerOutput) SetStatus(v string) *DescribeInferenceSchedulerOutput { + s.Status = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *DescribeInferenceSchedulerOutput) SetUpdatedAt(v time.Time) *DescribeInferenceSchedulerOutput { + s.UpdatedAt = &v + return s +} + +type DescribeModelInput struct { + _ struct{} `type:"structure"` + + // The name of the ML model to be described. + // + // ModelName is a required field + ModelName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeModelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeModelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeModelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeModelInput"} + if s.ModelName == nil { + invalidParams.Add(request.NewErrParamRequired("ModelName")) + } + if s.ModelName != nil && len(*s.ModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetModelName sets the ModelName field's value. +func (s *DescribeModelInput) SetModelName(v string) *DescribeModelInput { + s.ModelName = &v + return s +} + +type DescribeModelOutput struct { + _ struct{} `type:"structure"` + + // Indicates the time and date at which the ML model was created. + CreatedAt *time.Time `type:"timestamp"` + + // The configuration is the TargetSamplingRate, which is the sampling rate of + // the data after post processing by Amazon Lookout for Equipment. For example, + // if you provide data that has been collected at a 1 second level and you want + // the system to resample the data at a 1 minute rate before training, the TargetSamplingRate + // is 1 minute. + // + // When providing a value for the TargetSamplingRate, you must attach the prefix + // "PT" to the rate you want. The value for a 1 second rate is therefore PT1S, + // the value for a 15 minute rate is PT15M, and the value for a 1 hour rate + // is PT1H + DataPreProcessingConfiguration *DataPreProcessingConfiguration `type:"structure"` + + // The Amazon Resouce Name (ARN) of the dataset used to create the ML model + // being described. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset being used by the ML being described. + DatasetName *string `min:"1" type:"string"` + + // Indicates the time reference in the dataset that was used to end the subset + // of evaluation data for the ML model. + EvaluationDataEndTime *time.Time `type:"timestamp"` + + // Indicates the time reference in the dataset that was used to begin the subset + // of evaluation data for the ML model. + EvaluationDataStartTime *time.Time `type:"timestamp"` + + // If the training of the ML model failed, this indicates the reason for that + // failure. + FailedReason *string `min:"1" type:"string"` + + // Specifies configuration information about the labels input, including its + // S3 location. + LabelsInputConfiguration *LabelsInputConfiguration `type:"structure"` + + // Indicates the last time the ML model was updated. The type of update is not + // specified. + LastUpdatedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the ML model being described. + ModelArn *string `min:"20" type:"string"` + + // The Model Metrics show an aggregated summary of the model's performance within + // the evaluation time range. This is the JSON content of the metrics created + // when evaluating the model. + ModelMetrics aws.JSONValue `type:"jsonvalue"` + + // The name of the ML model being described. + ModelName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source for the ML model being described. + RoleArn *string `min:"20" type:"string"` + + // A JSON description of the data that is in each time series dataset, including + // names, column names, and data types. + Schema aws.JSONValue `type:"jsonvalue"` + + // Provides the identifier of the AWS KMS customer master key (CMK) used to + // encrypt model data by Amazon Lookout for Equipment. + ServerSideKmsKeyId *string `min:"1" type:"string"` + + // Specifies the current status of the model being described. Status describes + // the status of the most recent action of the model. + Status *string `type:"string" enum:"ModelStatus"` + + // Indicates the time reference in the dataset that was used to end the subset + // of training data for the ML model. + TrainingDataEndTime *time.Time `type:"timestamp"` + + // Indicates the time reference in the dataset that was used to begin the subset + // of training data for the ML model. + TrainingDataStartTime *time.Time `type:"timestamp"` + + // Indicates the time at which the training of the ML model was completed. + TrainingExecutionEndTime *time.Time `type:"timestamp"` + + // Indicates the time at which the training of the ML model began. + TrainingExecutionStartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s DescribeModelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeModelOutput) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *DescribeModelOutput) SetCreatedAt(v time.Time) *DescribeModelOutput { + s.CreatedAt = &v + return s +} + +// SetDataPreProcessingConfiguration sets the DataPreProcessingConfiguration field's value. +func (s *DescribeModelOutput) SetDataPreProcessingConfiguration(v *DataPreProcessingConfiguration) *DescribeModelOutput { + s.DataPreProcessingConfiguration = v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *DescribeModelOutput) SetDatasetArn(v string) *DescribeModelOutput { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *DescribeModelOutput) SetDatasetName(v string) *DescribeModelOutput { + s.DatasetName = &v + return s +} + +// SetEvaluationDataEndTime sets the EvaluationDataEndTime field's value. +func (s *DescribeModelOutput) SetEvaluationDataEndTime(v time.Time) *DescribeModelOutput { + s.EvaluationDataEndTime = &v + return s +} + +// SetEvaluationDataStartTime sets the EvaluationDataStartTime field's value. +func (s *DescribeModelOutput) SetEvaluationDataStartTime(v time.Time) *DescribeModelOutput { + s.EvaluationDataStartTime = &v + return s +} + +// SetFailedReason sets the FailedReason field's value. +func (s *DescribeModelOutput) SetFailedReason(v string) *DescribeModelOutput { + s.FailedReason = &v + return s +} + +// SetLabelsInputConfiguration sets the LabelsInputConfiguration field's value. +func (s *DescribeModelOutput) SetLabelsInputConfiguration(v *LabelsInputConfiguration) *DescribeModelOutput { + s.LabelsInputConfiguration = v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DescribeModelOutput) SetLastUpdatedTime(v time.Time) *DescribeModelOutput { + s.LastUpdatedTime = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *DescribeModelOutput) SetModelArn(v string) *DescribeModelOutput { + s.ModelArn = &v + return s +} + +// SetModelMetrics sets the ModelMetrics field's value. +func (s *DescribeModelOutput) SetModelMetrics(v aws.JSONValue) *DescribeModelOutput { + s.ModelMetrics = v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *DescribeModelOutput) SetModelName(v string) *DescribeModelOutput { + s.ModelName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeModelOutput) SetRoleArn(v string) *DescribeModelOutput { + s.RoleArn = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *DescribeModelOutput) SetSchema(v aws.JSONValue) *DescribeModelOutput { + s.Schema = v + return s +} + +// SetServerSideKmsKeyId sets the ServerSideKmsKeyId field's value. +func (s *DescribeModelOutput) SetServerSideKmsKeyId(v string) *DescribeModelOutput { + s.ServerSideKmsKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeModelOutput) SetStatus(v string) *DescribeModelOutput { + s.Status = &v + return s +} + +// SetTrainingDataEndTime sets the TrainingDataEndTime field's value. +func (s *DescribeModelOutput) SetTrainingDataEndTime(v time.Time) *DescribeModelOutput { + s.TrainingDataEndTime = &v + return s +} + +// SetTrainingDataStartTime sets the TrainingDataStartTime field's value. +func (s *DescribeModelOutput) SetTrainingDataStartTime(v time.Time) *DescribeModelOutput { + s.TrainingDataStartTime = &v + return s +} + +// SetTrainingExecutionEndTime sets the TrainingExecutionEndTime field's value. +func (s *DescribeModelOutput) SetTrainingExecutionEndTime(v time.Time) *DescribeModelOutput { + s.TrainingExecutionEndTime = &v + return s +} + +// SetTrainingExecutionStartTime sets the TrainingExecutionStartTime field's value. +func (s *DescribeModelOutput) SetTrainingExecutionStartTime(v time.Time) *DescribeModelOutput { + s.TrainingExecutionStartTime = &v + return s +} + +// Contains information about the specific inference execution, including input +// and output data configuration, inference scheduling information, status, +// and so on. +type InferenceExecutionSummary struct { + _ struct{} `type:"structure"` + + // Contains information about an S3 bucket. + CustomerResultObject *S3Object `type:"structure"` + + // Indicates the time reference in the dataset at which the inference execution + // stopped. + DataEndTime *time.Time `type:"timestamp"` + + // Specifies configuration information for the input data for the inference + // scheduler, including delimiter, format, and dataset location. + DataInputConfiguration *InferenceInputConfiguration `type:"structure"` + + // Specifies configuration information for the output results from for the inference + // execution, including the output S3 location. + DataOutputConfiguration *InferenceOutputConfiguration `type:"structure"` + + // Indicates the time reference in the dataset at which the inference execution + // began. + DataStartTime *time.Time `type:"timestamp"` + + // Specifies the reason for failure when an inference execution has failed. + FailedReason *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the inference scheduler being used for + // the inference execution. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of the inference scheduler being used for the inference execution. + InferenceSchedulerName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model used for the inference execution. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model being used for the inference execution. + ModelName *string `min:"1" type:"string"` + + // Indicates the start time at which the inference scheduler began the specific + // inference execution. + ScheduledStartTime *time.Time `type:"timestamp"` + + // Indicates the status of the inference execution. + Status *string `type:"string" enum:"InferenceExecutionStatus"` +} + +// String returns the string representation +func (s InferenceExecutionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceExecutionSummary) GoString() string { + return s.String() +} + +// SetCustomerResultObject sets the CustomerResultObject field's value. +func (s *InferenceExecutionSummary) SetCustomerResultObject(v *S3Object) *InferenceExecutionSummary { + s.CustomerResultObject = v + return s +} + +// SetDataEndTime sets the DataEndTime field's value. +func (s *InferenceExecutionSummary) SetDataEndTime(v time.Time) *InferenceExecutionSummary { + s.DataEndTime = &v + return s +} + +// SetDataInputConfiguration sets the DataInputConfiguration field's value. +func (s *InferenceExecutionSummary) SetDataInputConfiguration(v *InferenceInputConfiguration) *InferenceExecutionSummary { + s.DataInputConfiguration = v + return s +} + +// SetDataOutputConfiguration sets the DataOutputConfiguration field's value. +func (s *InferenceExecutionSummary) SetDataOutputConfiguration(v *InferenceOutputConfiguration) *InferenceExecutionSummary { + s.DataOutputConfiguration = v + return s +} + +// SetDataStartTime sets the DataStartTime field's value. +func (s *InferenceExecutionSummary) SetDataStartTime(v time.Time) *InferenceExecutionSummary { + s.DataStartTime = &v + return s +} + +// SetFailedReason sets the FailedReason field's value. +func (s *InferenceExecutionSummary) SetFailedReason(v string) *InferenceExecutionSummary { + s.FailedReason = &v + return s +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *InferenceExecutionSummary) SetInferenceSchedulerArn(v string) *InferenceExecutionSummary { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *InferenceExecutionSummary) SetInferenceSchedulerName(v string) *InferenceExecutionSummary { + s.InferenceSchedulerName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *InferenceExecutionSummary) SetModelArn(v string) *InferenceExecutionSummary { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *InferenceExecutionSummary) SetModelName(v string) *InferenceExecutionSummary { + s.ModelName = &v + return s +} + +// SetScheduledStartTime sets the ScheduledStartTime field's value. +func (s *InferenceExecutionSummary) SetScheduledStartTime(v time.Time) *InferenceExecutionSummary { + s.ScheduledStartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InferenceExecutionSummary) SetStatus(v string) *InferenceExecutionSummary { + s.Status = &v + return s +} + +// > Specifies configuration information for the input data for the inference, +// including S3 location of input data.. +type InferenceInputConfiguration struct { + _ struct{} `type:"structure"` + + // > Specifies configuration information for the input data for the inference, + // including timestamp format and delimiter. + InferenceInputNameConfiguration *InferenceInputNameConfiguration `type:"structure"` + + // Indicates the difference between your time zone and Greenwich Mean Time (GMT). + InputTimeZoneOffset *string `type:"string"` + + // Specifies configuration information for the input data for the inference, + // including S3 location of input data.. + S3InputConfiguration *InferenceS3InputConfiguration `type:"structure"` +} + +// String returns the string representation +func (s InferenceInputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceInputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceInputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceInputConfiguration"} + if s.S3InputConfiguration != nil { + if err := s.S3InputConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3InputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceInputNameConfiguration sets the InferenceInputNameConfiguration field's value. +func (s *InferenceInputConfiguration) SetInferenceInputNameConfiguration(v *InferenceInputNameConfiguration) *InferenceInputConfiguration { + s.InferenceInputNameConfiguration = v + return s +} + +// SetInputTimeZoneOffset sets the InputTimeZoneOffset field's value. +func (s *InferenceInputConfiguration) SetInputTimeZoneOffset(v string) *InferenceInputConfiguration { + s.InputTimeZoneOffset = &v + return s +} + +// SetS3InputConfiguration sets the S3InputConfiguration field's value. +func (s *InferenceInputConfiguration) SetS3InputConfiguration(v *InferenceS3InputConfiguration) *InferenceInputConfiguration { + s.S3InputConfiguration = v + return s +} + +// >> Specifies configuration information for the input data for the inference, +// including timestamp format and delimiter. +type InferenceInputNameConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates the delimiter character used between items in the data. + ComponentTimestampDelimiter *string `type:"string"` + + // The format of the timestamp, whether Epoch time, or standard, with or without + // hyphens (-). + TimestampFormat *string `type:"string"` +} + +// String returns the string representation +func (s InferenceInputNameConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceInputNameConfiguration) GoString() string { + return s.String() +} + +// SetComponentTimestampDelimiter sets the ComponentTimestampDelimiter field's value. +func (s *InferenceInputNameConfiguration) SetComponentTimestampDelimiter(v string) *InferenceInputNameConfiguration { + s.ComponentTimestampDelimiter = &v + return s +} + +// SetTimestampFormat sets the TimestampFormat field's value. +func (s *InferenceInputNameConfiguration) SetTimestampFormat(v string) *InferenceInputNameConfiguration { + s.TimestampFormat = &v + return s +} + +// Specifies configuration information for the output results from for the inference, +// including KMS key ID and output S3 location. +type InferenceOutputConfiguration struct { + _ struct{} `type:"structure"` + + // The ID number for the AWS KMS key used to encrypt the inference output. + KmsKeyId *string `min:"1" type:"string"` + + // Specifies configuration information for the output results from for the inference, + // output S3 location. + // + // S3OutputConfiguration is a required field + S3OutputConfiguration *InferenceS3OutputConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InferenceOutputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceOutputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceOutputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceOutputConfiguration"} + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } + if s.S3OutputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("S3OutputConfiguration")) + } + if s.S3OutputConfiguration != nil { + if err := s.S3OutputConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3OutputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *InferenceOutputConfiguration) SetKmsKeyId(v string) *InferenceOutputConfiguration { + s.KmsKeyId = &v + return s +} + +// SetS3OutputConfiguration sets the S3OutputConfiguration field's value. +func (s *InferenceOutputConfiguration) SetS3OutputConfiguration(v *InferenceS3OutputConfiguration) *InferenceOutputConfiguration { + s.S3OutputConfiguration = v + return s +} + +// Specifies configuration information for the input data for the inference, +// including input data S3 location. +type InferenceS3InputConfiguration struct { + _ struct{} `type:"structure"` + + // The bucket containing the input dataset for the inference. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the S3 bucket used for the input data for the inference. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InferenceS3InputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceS3InputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceS3InputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceS3InputConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *InferenceS3InputConfiguration) SetBucket(v string) *InferenceS3InputConfiguration { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InferenceS3InputConfiguration) SetPrefix(v string) *InferenceS3InputConfiguration { + s.Prefix = &v + return s +} + +// Specifies configuration information for the output results from the inference, +// including output S3 location. +type InferenceS3OutputConfiguration struct { + _ struct{} `type:"structure"` + + // The bucket containing the output results from the inference + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the S3 bucket used for the output results from the inference. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InferenceS3OutputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceS3OutputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceS3OutputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceS3OutputConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *InferenceS3OutputConfiguration) SetBucket(v string) *InferenceS3OutputConfiguration { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InferenceS3OutputConfiguration) SetPrefix(v string) *InferenceS3OutputConfiguration { + s.Prefix = &v + return s +} + +// Contains information about the specific inference scheduler, including data +// delay offset, model name and ARN, status, and so on. +type InferenceSchedulerSummary struct { + _ struct{} `type:"structure"` + + // > A period of time (in minutes) by which inference on the data is delayed + // after the data starts. For instance, if an offset delay time of five minutes + // was selected, inference will not begin on the data until the first data measurement + // after the five minute mark. For example, if five minutes is selected, the + // inference scheduler will wake up at the configured frequency with the additional + // five minute delay time to check the customer S3 bucket. The customer can + // upload data at the same frequency and they don't need to stop and restart + // the scheduler when uploading new data. + DataDelayOffsetInMinutes *int64 `type:"long"` + + // How often data is uploaded to the source S3 bucket for the input data. This + // value is the length of time between data uploads. For instance, if you select + // 5 minutes, Amazon Lookout for Equipment will upload the real-time data to + // the source bucket once every 5 minutes. This frequency also determines how + // often Amazon Lookout for Equipment starts a scheduled inference on your data. + // In this example, it starts once every 5 minutes. + DataUploadFrequency *string `type:"string" enum:"DataUploadFrequency"` + + // The Amazon Resource Name (ARN) of the inference scheduler. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of the inference scheduler. + InferenceSchedulerName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model used by the inference scheduler. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model used for the inference scheduler. + ModelName *string `min:"1" type:"string"` + + // Indicates the status of the inference scheduler. + Status *string `type:"string" enum:"InferenceSchedulerStatus"` +} + +// String returns the string representation +func (s InferenceSchedulerSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceSchedulerSummary) GoString() string { + return s.String() +} + +// SetDataDelayOffsetInMinutes sets the DataDelayOffsetInMinutes field's value. +func (s *InferenceSchedulerSummary) SetDataDelayOffsetInMinutes(v int64) *InferenceSchedulerSummary { + s.DataDelayOffsetInMinutes = &v + return s +} + +// SetDataUploadFrequency sets the DataUploadFrequency field's value. +func (s *InferenceSchedulerSummary) SetDataUploadFrequency(v string) *InferenceSchedulerSummary { + s.DataUploadFrequency = &v + return s +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *InferenceSchedulerSummary) SetInferenceSchedulerArn(v string) *InferenceSchedulerSummary { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *InferenceSchedulerSummary) SetInferenceSchedulerName(v string) *InferenceSchedulerSummary { + s.InferenceSchedulerName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *InferenceSchedulerSummary) SetModelArn(v string) *InferenceSchedulerSummary { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *InferenceSchedulerSummary) SetModelName(v string) *InferenceSchedulerSummary { + s.ModelName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InferenceSchedulerSummary) SetStatus(v string) *InferenceSchedulerSummary { + s.Status = &v + return s +} + +// Specifies configuration information for the input data for the data ingestion +// job, including input data S3 location. +type IngestionInputConfiguration struct { + _ struct{} `type:"structure"` + + // The location information for the S3 bucket used for input data for the data + // ingestion. + // + // S3InputConfiguration is a required field + S3InputConfiguration *IngestionS3InputConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s IngestionInputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IngestionInputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IngestionInputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IngestionInputConfiguration"} + if s.S3InputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("S3InputConfiguration")) + } + if s.S3InputConfiguration != nil { + if err := s.S3InputConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3InputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3InputConfiguration sets the S3InputConfiguration field's value. +func (s *IngestionInputConfiguration) SetS3InputConfiguration(v *IngestionS3InputConfiguration) *IngestionInputConfiguration { + s.S3InputConfiguration = v + return s +} + +// Specifies S3 configuration information for the input data for the data ingestion +// job. +type IngestionS3InputConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket used for the input data for the data ingestion. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the S3 location being used for the input data for the data + // ingestion. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s IngestionS3InputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IngestionS3InputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IngestionS3InputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IngestionS3InputConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *IngestionS3InputConfiguration) SetBucket(v string) *IngestionS3InputConfiguration { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IngestionS3InputConfiguration) SetPrefix(v string) *IngestionS3InputConfiguration { + s.Prefix = &v + return s +} + +// Processing of the request has failed because of an unknown error, exception +// or failure. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains the configuration information for the S3 location being used to +// hold label data. +type LabelsInputConfiguration struct { + _ struct{} `type:"structure"` + + // Contains location information for the S3 location being used for label data. + // + // S3InputConfiguration is a required field + S3InputConfiguration *LabelsS3InputConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LabelsInputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LabelsInputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LabelsInputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LabelsInputConfiguration"} + if s.S3InputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("S3InputConfiguration")) + } + if s.S3InputConfiguration != nil { + if err := s.S3InputConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3InputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3InputConfiguration sets the S3InputConfiguration field's value. +func (s *LabelsInputConfiguration) SetS3InputConfiguration(v *LabelsS3InputConfiguration) *LabelsInputConfiguration { + s.S3InputConfiguration = v + return s +} + +// The location information (prefix and bucket name) for the s3 location being +// used for label data. +type LabelsS3InputConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket holding the label data. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the S3 bucket used for the label data. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s LabelsS3InputConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LabelsS3InputConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LabelsS3InputConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LabelsS3InputConfiguration"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *LabelsS3InputConfiguration) SetBucket(v string) *LabelsS3InputConfiguration { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LabelsS3InputConfiguration) SetPrefix(v string) *LabelsS3InputConfiguration { + s.Prefix = &v + return s +} + +type ListDataIngestionJobsInput struct { + _ struct{} `type:"structure"` + + // The name of the dataset being used for the data ingestion job. + DatasetName *string `min:"1" type:"string"` + + // Specifies the maximum number of data ingestion jobs to list. + MaxResults *int64 `min:"1" type:"integer"` + + // An opaque pagination token indicating where to continue the listing of data + // ingestion jobs. + NextToken *string `type:"string"` + + // Indicates the status of the data ingestion job. + Status *string `type:"string" enum:"IngestionJobStatus"` +} + +// String returns the string representation +func (s ListDataIngestionJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDataIngestionJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataIngestionJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataIngestionJobsInput"} + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetName sets the DatasetName field's value. +func (s *ListDataIngestionJobsInput) SetDatasetName(v string) *ListDataIngestionJobsInput { + s.DatasetName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataIngestionJobsInput) SetMaxResults(v int64) *ListDataIngestionJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataIngestionJobsInput) SetNextToken(v string) *ListDataIngestionJobsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListDataIngestionJobsInput) SetStatus(v string) *ListDataIngestionJobsInput { + s.Status = &v + return s +} + +type ListDataIngestionJobsOutput struct { + _ struct{} `type:"structure"` + + // Specifies information about the specific data ingestion job, including dataset + // name and status. + DataIngestionJobSummaries []*DataIngestionJobSummary `type:"list"` + + // An opaque pagination token indicating where to continue the listing of data + // ingestion jobs. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDataIngestionJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDataIngestionJobsOutput) GoString() string { + return s.String() +} + +// SetDataIngestionJobSummaries sets the DataIngestionJobSummaries field's value. +func (s *ListDataIngestionJobsOutput) SetDataIngestionJobSummaries(v []*DataIngestionJobSummary) *ListDataIngestionJobsOutput { + s.DataIngestionJobSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataIngestionJobsOutput) SetNextToken(v string) *ListDataIngestionJobsOutput { + s.NextToken = &v + return s +} + +type ListDatasetsInput struct { + _ struct{} `type:"structure"` + + // The beginning of the name of the datasets to be listed. + DatasetNameBeginsWith *string `min:"1" type:"string"` + + // Specifies the maximum number of datasets to list. + MaxResults *int64 `min:"1" type:"integer"` + + // An opaque pagination token indicating where to continue the listing of datasets. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDatasetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatasetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatasetsInput"} + if s.DatasetNameBeginsWith != nil && len(*s.DatasetNameBeginsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetNameBeginsWith", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetNameBeginsWith sets the DatasetNameBeginsWith field's value. +func (s *ListDatasetsInput) SetDatasetNameBeginsWith(v string) *ListDatasetsInput { + s.DatasetNameBeginsWith = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatasetsInput) SetMaxResults(v int64) *ListDatasetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsInput) SetNextToken(v string) *ListDatasetsInput { + s.NextToken = &v + return s +} + +type ListDatasetsOutput struct { + _ struct{} `type:"structure"` + + // Provides information about the specified dataset, including creation time, + // dataset ARN, and status. + DatasetSummaries []*DatasetSummary `type:"list"` + + // An opaque pagination token indicating where to continue the listing of datasets. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDatasetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatasetsOutput) GoString() string { + return s.String() +} + +// SetDatasetSummaries sets the DatasetSummaries field's value. +func (s *ListDatasetsOutput) SetDatasetSummaries(v []*DatasetSummary) *ListDatasetsOutput { + s.DatasetSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatasetsOutput) SetNextToken(v string) *ListDatasetsOutput { + s.NextToken = &v + return s +} + +type ListInferenceExecutionsInput struct { + _ struct{} `type:"structure"` + + // The time reference in the inferenced dataset before which Amazon Lookout + // for Equipment stopped the inference execution. + DataEndTimeBefore *time.Time `type:"timestamp"` + + // The time reference in the inferenced dataset after which Amazon Lookout for + // Equipment started the inference execution. + DataStartTimeAfter *time.Time `type:"timestamp"` + + // The name of the inference scheduler for the inference execution listed. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` + + // Specifies the maximum number of inference executions to list. + MaxResults *int64 `min:"1" type:"integer"` + + // An opaque pagination token indicating where to continue the listing of inference + // executions. + NextToken *string `type:"string"` + + // The status of the inference execution. + Status *string `type:"string" enum:"InferenceExecutionStatus"` +} + +// String returns the string representation +func (s ListInferenceExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInferenceExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInferenceExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInferenceExecutionsInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataEndTimeBefore sets the DataEndTimeBefore field's value. +func (s *ListInferenceExecutionsInput) SetDataEndTimeBefore(v time.Time) *ListInferenceExecutionsInput { + s.DataEndTimeBefore = &v + return s +} + +// SetDataStartTimeAfter sets the DataStartTimeAfter field's value. +func (s *ListInferenceExecutionsInput) SetDataStartTimeAfter(v time.Time) *ListInferenceExecutionsInput { + s.DataStartTimeAfter = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *ListInferenceExecutionsInput) SetInferenceSchedulerName(v string) *ListInferenceExecutionsInput { + s.InferenceSchedulerName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListInferenceExecutionsInput) SetMaxResults(v int64) *ListInferenceExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInferenceExecutionsInput) SetNextToken(v string) *ListInferenceExecutionsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListInferenceExecutionsInput) SetStatus(v string) *ListInferenceExecutionsInput { + s.Status = &v + return s +} + +type ListInferenceExecutionsOutput struct { + _ struct{} `type:"structure"` + + // Provides an array of information about the individual inference executions + // returned from the ListInferenceExecutions operation, including model used, + // inference scheduler, data configuration, and so on. + InferenceExecutionSummaries []*InferenceExecutionSummary `type:"list"` + + // An opaque pagination token indicating where to continue the listing of inference + // executions. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListInferenceExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInferenceExecutionsOutput) GoString() string { + return s.String() +} + +// SetInferenceExecutionSummaries sets the InferenceExecutionSummaries field's value. +func (s *ListInferenceExecutionsOutput) SetInferenceExecutionSummaries(v []*InferenceExecutionSummary) *ListInferenceExecutionsOutput { + s.InferenceExecutionSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInferenceExecutionsOutput) SetNextToken(v string) *ListInferenceExecutionsOutput { + s.NextToken = &v + return s +} + +type ListInferenceSchedulersInput struct { + _ struct{} `type:"structure"` + + // The beginning of the name of the inference schedulers to be listed. + InferenceSchedulerNameBeginsWith *string `min:"1" type:"string"` + + // Specifies the maximum number of inference schedulers to list. + MaxResults *int64 `min:"1" type:"integer"` + + // The name of the ML model used by the inference scheduler to be listed. + ModelName *string `min:"1" type:"string"` + + // An opaque pagination token indicating where to continue the listing of inference + // schedulers. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListInferenceSchedulersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInferenceSchedulersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInferenceSchedulersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInferenceSchedulersInput"} + if s.InferenceSchedulerNameBeginsWith != nil && len(*s.InferenceSchedulerNameBeginsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerNameBeginsWith", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ModelName != nil && len(*s.ModelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceSchedulerNameBeginsWith sets the InferenceSchedulerNameBeginsWith field's value. +func (s *ListInferenceSchedulersInput) SetInferenceSchedulerNameBeginsWith(v string) *ListInferenceSchedulersInput { + s.InferenceSchedulerNameBeginsWith = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListInferenceSchedulersInput) SetMaxResults(v int64) *ListInferenceSchedulersInput { + s.MaxResults = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *ListInferenceSchedulersInput) SetModelName(v string) *ListInferenceSchedulersInput { + s.ModelName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInferenceSchedulersInput) SetNextToken(v string) *ListInferenceSchedulersInput { + s.NextToken = &v + return s +} + +type ListInferenceSchedulersOutput struct { + _ struct{} `type:"structure"` + + // Provides information about the specified inference scheduler, including data + // upload frequency, model name and ARN, and status. + InferenceSchedulerSummaries []*InferenceSchedulerSummary `type:"list"` + + // An opaque pagination token indicating where to continue the listing of inference + // schedulers. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListInferenceSchedulersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInferenceSchedulersOutput) GoString() string { + return s.String() +} + +// SetInferenceSchedulerSummaries sets the InferenceSchedulerSummaries field's value. +func (s *ListInferenceSchedulersOutput) SetInferenceSchedulerSummaries(v []*InferenceSchedulerSummary) *ListInferenceSchedulersOutput { + s.InferenceSchedulerSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInferenceSchedulersOutput) SetNextToken(v string) *ListInferenceSchedulersOutput { + s.NextToken = &v + return s +} + +type ListModelsInput struct { + _ struct{} `type:"structure"` + + // The beginning of the name of the dataset of the ML models to be listed. + DatasetNameBeginsWith *string `min:"1" type:"string"` + + // Specifies the maximum number of ML models to list. + MaxResults *int64 `min:"1" type:"integer"` + + // The beginning of the name of the ML models being listed. + ModelNameBeginsWith *string `min:"1" type:"string"` + + // An opaque pagination token indicating where to continue the listing of ML + // models. + NextToken *string `type:"string"` + + // The status of the ML model. + Status *string `type:"string" enum:"ModelStatus"` +} + +// String returns the string representation +func (s ListModelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListModelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListModelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListModelsInput"} + if s.DatasetNameBeginsWith != nil && len(*s.DatasetNameBeginsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetNameBeginsWith", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ModelNameBeginsWith != nil && len(*s.ModelNameBeginsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ModelNameBeginsWith", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetNameBeginsWith sets the DatasetNameBeginsWith field's value. +func (s *ListModelsInput) SetDatasetNameBeginsWith(v string) *ListModelsInput { + s.DatasetNameBeginsWith = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListModelsInput) SetMaxResults(v int64) *ListModelsInput { + s.MaxResults = &v + return s +} + +// SetModelNameBeginsWith sets the ModelNameBeginsWith field's value. +func (s *ListModelsInput) SetModelNameBeginsWith(v string) *ListModelsInput { + s.ModelNameBeginsWith = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListModelsInput) SetNextToken(v string) *ListModelsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListModelsInput) SetStatus(v string) *ListModelsInput { + s.Status = &v + return s +} + +type ListModelsOutput struct { + _ struct{} `type:"structure"` + + // Provides information on the specified model, including created time, model + // and dataset ARNs, and status. + ModelSummaries []*ModelSummary `type:"list"` + + // An opaque pagination token indicating where to continue the listing of ML + // models. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListModelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListModelsOutput) GoString() string { + return s.String() +} + +// SetModelSummaries sets the ModelSummaries field's value. +func (s *ListModelsOutput) SetModelSummaries(v []*ModelSummary) *ListModelsOutput { + s.ModelSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListModelsOutput) SetNextToken(v string) *ListModelsOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource (such as the dataset or model) + // that is the focus of the ListTagsForResource operation. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Any tags associated with the resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Provides information about the specified ML model, including dataset and +// model names and ARNs, as well as status. +type ModelSummary struct { + _ struct{} `type:"structure"` + + // The time at which the specific model was created. + CreatedAt *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the dataset used to create the model. + DatasetArn *string `min:"20" type:"string"` + + // The name of the dataset being used for the ML model. + DatasetName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model. + ModelName *string `min:"1" type:"string"` + + // Indicates the status of the ML model. + Status *string `type:"string" enum:"ModelStatus"` +} + +// String returns the string representation +func (s ModelSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModelSummary) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *ModelSummary) SetCreatedAt(v time.Time) *ModelSummary { + s.CreatedAt = &v + return s +} + +// SetDatasetArn sets the DatasetArn field's value. +func (s *ModelSummary) SetDatasetArn(v string) *ModelSummary { + s.DatasetArn = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *ModelSummary) SetDatasetName(v string) *ModelSummary { + s.DatasetName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *ModelSummary) SetModelArn(v string) *ModelSummary { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *ModelSummary) SetModelName(v string) *ModelSummary { + s.ModelName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ModelSummary) SetStatus(v string) *ModelSummary { + s.Status = &v + return s +} + +// The resource requested could not be found. Verify the resource ID and retry +// your request. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains information about an S3 bucket. +type S3Object struct { + _ struct{} `type:"structure"` + + // The name of the specific S3 bucket. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The AWS Key Management Service (AWS KMS) key being used to encrypt the S3 + // object. Without this key, data in the bucket is not accessible. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Object) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *S3Object) SetBucket(v string) *S3Object { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *S3Object) SetKey(v string) *S3Object { + s.Key = &v + return s +} + +// Resource limitations have been exceeded. +type ServiceQuotaExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ServiceQuotaExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceQuotaExceededException) GoString() string { + return s.String() +} + +func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { + return &ServiceQuotaExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceQuotaExceededException) Code() string { + return "ServiceQuotaExceededException" +} + +// Message returns the exception's message. +func (s *ServiceQuotaExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceQuotaExceededException) OrigErr() error { + return nil +} + +func (s *ServiceQuotaExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDataIngestionJobInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the request. If you do not set the client request + // token, Amazon Lookout for Equipment generates one. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The name of the dataset being used by the data ingestion job. + // + // DatasetName is a required field + DatasetName *string `min:"1" type:"string" required:"true"` + + // Specifies information for the input data for the data ingestion job, including + // dataset S3 location. + // + // IngestionInputConfiguration is a required field + IngestionInputConfiguration *IngestionInputConfiguration `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source for the data ingestion job. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartDataIngestionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataIngestionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDataIngestionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDataIngestionJobInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DatasetName == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetName")) + } + if s.DatasetName != nil && len(*s.DatasetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) + } + if s.IngestionInputConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IngestionInputConfiguration")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.IngestionInputConfiguration != nil { + if err := s.IngestionInputConfiguration.Validate(); err != nil { + invalidParams.AddNested("IngestionInputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *StartDataIngestionJobInput) SetClientToken(v string) *StartDataIngestionJobInput { + s.ClientToken = &v + return s +} + +// SetDatasetName sets the DatasetName field's value. +func (s *StartDataIngestionJobInput) SetDatasetName(v string) *StartDataIngestionJobInput { + s.DatasetName = &v + return s +} + +// SetIngestionInputConfiguration sets the IngestionInputConfiguration field's value. +func (s *StartDataIngestionJobInput) SetIngestionInputConfiguration(v *IngestionInputConfiguration) *StartDataIngestionJobInput { + s.IngestionInputConfiguration = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *StartDataIngestionJobInput) SetRoleArn(v string) *StartDataIngestionJobInput { + s.RoleArn = &v + return s +} + +type StartDataIngestionJobOutput struct { + _ struct{} `type:"structure"` + + // Indicates the job ID of the data ingestion job. + JobId *string `type:"string"` + + // Indicates the status of the StartDataIngestionJob operation. + Status *string `type:"string" enum:"IngestionJobStatus"` +} + +// String returns the string representation +func (s StartDataIngestionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartDataIngestionJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StartDataIngestionJobOutput) SetJobId(v string) *StartDataIngestionJobOutput { + s.JobId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartDataIngestionJobOutput) SetStatus(v string) *StartDataIngestionJobOutput { + s.Status = &v + return s +} + +type StartInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // The name of the inference scheduler to be started. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInferenceSchedulerInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *StartInferenceSchedulerInput) SetInferenceSchedulerName(v string) *StartInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +type StartInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the inference scheduler being started. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of the inference scheduler being started. + InferenceSchedulerName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model being used by the inference + // scheduler. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model being used by the inference scheduler. + ModelName *string `min:"1" type:"string"` + + // Indicates the status of the inference scheduler. + Status *string `type:"string" enum:"InferenceSchedulerStatus"` +} + +// String returns the string representation +func (s StartInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInferenceSchedulerOutput) GoString() string { + return s.String() +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *StartInferenceSchedulerOutput) SetInferenceSchedulerArn(v string) *StartInferenceSchedulerOutput { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *StartInferenceSchedulerOutput) SetInferenceSchedulerName(v string) *StartInferenceSchedulerOutput { + s.InferenceSchedulerName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *StartInferenceSchedulerOutput) SetModelArn(v string) *StartInferenceSchedulerOutput { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *StartInferenceSchedulerOutput) SetModelName(v string) *StartInferenceSchedulerOutput { + s.ModelName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartInferenceSchedulerOutput) SetStatus(v string) *StartInferenceSchedulerOutput { + s.Status = &v + return s +} + +type StopInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // The name of the inference scheduler to be stopped. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopInferenceSchedulerInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *StopInferenceSchedulerInput) SetInferenceSchedulerName(v string) *StopInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +type StopInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the inference schedule being stopped. + InferenceSchedulerArn *string `min:"20" type:"string"` + + // The name of the inference scheduler being stopped. + InferenceSchedulerName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the ML model used by the inference scheduler + // being stopped. + ModelArn *string `min:"20" type:"string"` + + // The name of the ML model used by the inference scheduler being stopped. + ModelName *string `min:"1" type:"string"` + + // Indicates the status of the inference scheduler. + Status *string `type:"string" enum:"InferenceSchedulerStatus"` +} + +// String returns the string representation +func (s StopInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInferenceSchedulerOutput) GoString() string { + return s.String() +} + +// SetInferenceSchedulerArn sets the InferenceSchedulerArn field's value. +func (s *StopInferenceSchedulerOutput) SetInferenceSchedulerArn(v string) *StopInferenceSchedulerOutput { + s.InferenceSchedulerArn = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *StopInferenceSchedulerOutput) SetInferenceSchedulerName(v string) *StopInferenceSchedulerOutput { + s.InferenceSchedulerName = &v + return s +} + +// SetModelArn sets the ModelArn field's value. +func (s *StopInferenceSchedulerOutput) SetModelArn(v string) *StopInferenceSchedulerOutput { + s.ModelArn = &v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *StopInferenceSchedulerOutput) SetModelName(v string) *StopInferenceSchedulerOutput { + s.ModelName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StopInferenceSchedulerOutput) SetStatus(v string) *StopInferenceSchedulerOutput { + s.Status = &v + return s +} + +// A tag is a key-value pair that can be added to a resource as metadata. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for the specified tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for the specified tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the specific resource to which the tag + // should be associated. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // The tag or tags to be associated with a specific resource. Both the tag key + // and value are specified. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The request was denied due to request throttling. +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ThrottlingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ThrottlingException) GoString() string { + return s.String() +} + +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ThrottlingException) Code() string { + return "ThrottlingException" +} + +// Message returns the exception's message. +func (s *ThrottlingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ThrottlingException) OrigErr() error { + return nil +} + +func (s *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which the tag is currently + // associated. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // Specifies the key of the tag to be removed from a specified resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateInferenceSchedulerInput struct { + _ struct{} `type:"structure"` + + // > A period of time (in minutes) by which inference on the data is delayed + // after the data starts. For instance, if you select an offset delay time of + // five minutes, inference will not begin on the data until the first data measurement + // after the five minute mark. For example, if five minutes is selected, the + // inference scheduler will wake up at the configured frequency with the additional + // five minute delay time to check the customer S3 bucket. The customer can + // upload data at the same frequency and they don't need to stop and restart + // the scheduler when uploading new data. + DataDelayOffsetInMinutes *int64 `type:"long"` + + // Specifies information for the input data for the inference scheduler, including + // delimiter, format, and dataset location. + DataInputConfiguration *InferenceInputConfiguration `type:"structure"` + + // Specifies information for the output results from the inference scheduler, + // including the output S3 location. + DataOutputConfiguration *InferenceOutputConfiguration `type:"structure"` + + // How often data is uploaded to the source S3 bucket for the input data. The + // value chosen is the length of time between data uploads. For instance, if + // you select 5 minutes, Amazon Lookout for Equipment will upload the real-time + // data to the source bucket once every 5 minutes. This frequency also determines + // how often Amazon Lookout for Equipment starts a scheduled inference on your + // data. In this example, it starts once every 5 minutes. + DataUploadFrequency *string `type:"string" enum:"DataUploadFrequency"` + + // The name of the inference scheduler to be updated. + // + // InferenceSchedulerName is a required field + InferenceSchedulerName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of a role with permission to access the data + // source for the inference scheduler. + RoleArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateInferenceSchedulerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInferenceSchedulerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateInferenceSchedulerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateInferenceSchedulerInput"} + if s.InferenceSchedulerName == nil { + invalidParams.Add(request.NewErrParamRequired("InferenceSchedulerName")) + } + if s.InferenceSchedulerName != nil && len(*s.InferenceSchedulerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InferenceSchedulerName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.DataInputConfiguration != nil { + if err := s.DataInputConfiguration.Validate(); err != nil { + invalidParams.AddNested("DataInputConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.DataOutputConfiguration != nil { + if err := s.DataOutputConfiguration.Validate(); err != nil { + invalidParams.AddNested("DataOutputConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataDelayOffsetInMinutes sets the DataDelayOffsetInMinutes field's value. +func (s *UpdateInferenceSchedulerInput) SetDataDelayOffsetInMinutes(v int64) *UpdateInferenceSchedulerInput { + s.DataDelayOffsetInMinutes = &v + return s +} + +// SetDataInputConfiguration sets the DataInputConfiguration field's value. +func (s *UpdateInferenceSchedulerInput) SetDataInputConfiguration(v *InferenceInputConfiguration) *UpdateInferenceSchedulerInput { + s.DataInputConfiguration = v + return s +} + +// SetDataOutputConfiguration sets the DataOutputConfiguration field's value. +func (s *UpdateInferenceSchedulerInput) SetDataOutputConfiguration(v *InferenceOutputConfiguration) *UpdateInferenceSchedulerInput { + s.DataOutputConfiguration = v + return s +} + +// SetDataUploadFrequency sets the DataUploadFrequency field's value. +func (s *UpdateInferenceSchedulerInput) SetDataUploadFrequency(v string) *UpdateInferenceSchedulerInput { + s.DataUploadFrequency = &v + return s +} + +// SetInferenceSchedulerName sets the InferenceSchedulerName field's value. +func (s *UpdateInferenceSchedulerInput) SetInferenceSchedulerName(v string) *UpdateInferenceSchedulerInput { + s.InferenceSchedulerName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateInferenceSchedulerInput) SetRoleArn(v string) *UpdateInferenceSchedulerInput { + s.RoleArn = &v + return s +} + +type UpdateInferenceSchedulerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateInferenceSchedulerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInferenceSchedulerOutput) GoString() string { + return s.String() +} + +// The input fails to satisfy constraints specified by Amazon Lookout for Equipment +// or a related AWS service that's being utilized. +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // DataUploadFrequencyPt5m is a DataUploadFrequency enum value + DataUploadFrequencyPt5m = "PT5M" + + // DataUploadFrequencyPt10m is a DataUploadFrequency enum value + DataUploadFrequencyPt10m = "PT10M" + + // DataUploadFrequencyPt15m is a DataUploadFrequency enum value + DataUploadFrequencyPt15m = "PT15M" + + // DataUploadFrequencyPt30m is a DataUploadFrequency enum value + DataUploadFrequencyPt30m = "PT30M" + + // DataUploadFrequencyPt1h is a DataUploadFrequency enum value + DataUploadFrequencyPt1h = "PT1H" +) + +// DataUploadFrequency_Values returns all elements of the DataUploadFrequency enum +func DataUploadFrequency_Values() []string { + return []string{ + DataUploadFrequencyPt5m, + DataUploadFrequencyPt10m, + DataUploadFrequencyPt15m, + DataUploadFrequencyPt30m, + DataUploadFrequencyPt1h, + } +} + +const ( + // DatasetStatusCreated is a DatasetStatus enum value + DatasetStatusCreated = "CREATED" + + // DatasetStatusIngestionInProgress is a DatasetStatus enum value + DatasetStatusIngestionInProgress = "INGESTION_IN_PROGRESS" + + // DatasetStatusActive is a DatasetStatus enum value + DatasetStatusActive = "ACTIVE" +) + +// DatasetStatus_Values returns all elements of the DatasetStatus enum +func DatasetStatus_Values() []string { + return []string{ + DatasetStatusCreated, + DatasetStatusIngestionInProgress, + DatasetStatusActive, + } +} + +const ( + // InferenceExecutionStatusInProgress is a InferenceExecutionStatus enum value + InferenceExecutionStatusInProgress = "IN_PROGRESS" + + // InferenceExecutionStatusSuccess is a InferenceExecutionStatus enum value + InferenceExecutionStatusSuccess = "SUCCESS" + + // InferenceExecutionStatusFailed is a InferenceExecutionStatus enum value + InferenceExecutionStatusFailed = "FAILED" +) + +// InferenceExecutionStatus_Values returns all elements of the InferenceExecutionStatus enum +func InferenceExecutionStatus_Values() []string { + return []string{ + InferenceExecutionStatusInProgress, + InferenceExecutionStatusSuccess, + InferenceExecutionStatusFailed, + } +} + +const ( + // InferenceSchedulerStatusPending is a InferenceSchedulerStatus enum value + InferenceSchedulerStatusPending = "PENDING" + + // InferenceSchedulerStatusRunning is a InferenceSchedulerStatus enum value + InferenceSchedulerStatusRunning = "RUNNING" + + // InferenceSchedulerStatusStopping is a InferenceSchedulerStatus enum value + InferenceSchedulerStatusStopping = "STOPPING" + + // InferenceSchedulerStatusStopped is a InferenceSchedulerStatus enum value + InferenceSchedulerStatusStopped = "STOPPED" +) + +// InferenceSchedulerStatus_Values returns all elements of the InferenceSchedulerStatus enum +func InferenceSchedulerStatus_Values() []string { + return []string{ + InferenceSchedulerStatusPending, + InferenceSchedulerStatusRunning, + InferenceSchedulerStatusStopping, + InferenceSchedulerStatusStopped, + } +} + +const ( + // IngestionJobStatusInProgress is a IngestionJobStatus enum value + IngestionJobStatusInProgress = "IN_PROGRESS" + + // IngestionJobStatusSuccess is a IngestionJobStatus enum value + IngestionJobStatusSuccess = "SUCCESS" + + // IngestionJobStatusFailed is a IngestionJobStatus enum value + IngestionJobStatusFailed = "FAILED" +) + +// IngestionJobStatus_Values returns all elements of the IngestionJobStatus enum +func IngestionJobStatus_Values() []string { + return []string{ + IngestionJobStatusInProgress, + IngestionJobStatusSuccess, + IngestionJobStatusFailed, + } +} + +const ( + // ModelStatusInProgress is a ModelStatus enum value + ModelStatusInProgress = "IN_PROGRESS" + + // ModelStatusSuccess is a ModelStatus enum value + ModelStatusSuccess = "SUCCESS" + + // ModelStatusFailed is a ModelStatus enum value + ModelStatusFailed = "FAILED" +) + +// ModelStatus_Values returns all elements of the ModelStatus enum +func ModelStatus_Values() []string { + return []string{ + ModelStatusInProgress, + ModelStatusSuccess, + ModelStatusFailed, + } +} + +const ( + // TargetSamplingRatePt1s is a TargetSamplingRate enum value + TargetSamplingRatePt1s = "PT1S" + + // TargetSamplingRatePt5s is a TargetSamplingRate enum value + TargetSamplingRatePt5s = "PT5S" + + // TargetSamplingRatePt10s is a TargetSamplingRate enum value + TargetSamplingRatePt10s = "PT10S" + + // TargetSamplingRatePt15s is a TargetSamplingRate enum value + TargetSamplingRatePt15s = "PT15S" + + // TargetSamplingRatePt30s is a TargetSamplingRate enum value + TargetSamplingRatePt30s = "PT30S" + + // TargetSamplingRatePt1m is a TargetSamplingRate enum value + TargetSamplingRatePt1m = "PT1M" + + // TargetSamplingRatePt5m is a TargetSamplingRate enum value + TargetSamplingRatePt5m = "PT5M" + + // TargetSamplingRatePt10m is a TargetSamplingRate enum value + TargetSamplingRatePt10m = "PT10M" + + // TargetSamplingRatePt15m is a TargetSamplingRate enum value + TargetSamplingRatePt15m = "PT15M" + + // TargetSamplingRatePt30m is a TargetSamplingRate enum value + TargetSamplingRatePt30m = "PT30M" + + // TargetSamplingRatePt1h is a TargetSamplingRate enum value + TargetSamplingRatePt1h = "PT1H" +) + +// TargetSamplingRate_Values returns all elements of the TargetSamplingRate enum +func TargetSamplingRate_Values() []string { + return []string{ + TargetSamplingRatePt1s, + TargetSamplingRatePt5s, + TargetSamplingRatePt10s, + TargetSamplingRatePt15s, + TargetSamplingRatePt30s, + TargetSamplingRatePt1m, + TargetSamplingRatePt5m, + TargetSamplingRatePt10m, + TargetSamplingRatePt15m, + TargetSamplingRatePt30m, + TargetSamplingRatePt1h, + } +} diff --git a/service/lookoutequipment/doc.go b/service/lookoutequipment/doc.go new file mode 100644 index 00000000000..52bd85ddc95 --- /dev/null +++ b/service/lookoutequipment/doc.go @@ -0,0 +1,30 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package lookoutequipment provides the client and types for making API +// requests to Amazon Lookout for Equipment. +// +// Amazon Lookout for Equipment is a machine learning service that uses advanced +// analytics to identify anomalies in machines from sensor data for use in predictive +// maintenance. +// +// See https://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15 for more information on this service. +// +// See lookoutequipment package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/lookoutequipment/ +// +// Using the Client +// +// To contact Amazon Lookout for Equipment with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Lookout for Equipment client LookoutEquipment for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/lookoutequipment/#New +package lookoutequipment diff --git a/service/lookoutequipment/errors.go b/service/lookoutequipment/errors.go new file mode 100644 index 00000000000..c321aedd437 --- /dev/null +++ b/service/lookoutequipment/errors.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lookoutequipment + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // The request could not be completed because you do not have access to the + // resource. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // The request could not be completed due to a conflict with the current state + // of the target resource. + ErrCodeConflictException = "ConflictException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Processing of the request has failed because of an unknown error, exception + // or failure. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource requested could not be found. Verify the resource ID and retry + // your request. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceQuotaExceededException for service response error code + // "ServiceQuotaExceededException". + // + // Resource limitations have been exceeded. + ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The input fails to satisfy constraints specified by Amazon Lookout for Equipment + // or a related AWS service that's being utilized. + ErrCodeValidationException = "ValidationException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "ConflictException": newErrorConflictException, + "InternalServerException": newErrorInternalServerException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, + "ThrottlingException": newErrorThrottlingException, + "ValidationException": newErrorValidationException, +} diff --git a/service/lookoutequipment/lookoutequipmentiface/interface.go b/service/lookoutequipment/lookoutequipmentiface/interface.go new file mode 100644 index 00000000000..154b3b5cf29 --- /dev/null +++ b/service/lookoutequipment/lookoutequipmentiface/interface.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package lookoutequipmentiface provides an interface to enable mocking the Amazon Lookout for Equipment service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package lookoutequipmentiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/lookoutequipment" +) + +// LookoutEquipmentAPI provides an interface to enable mocking the +// lookoutequipment.LookoutEquipment service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Lookout for Equipment. +// func myFunc(svc lookoutequipmentiface.LookoutEquipmentAPI) bool { +// // Make svc.CreateDataset request +// } +// +// func main() { +// sess := session.New() +// svc := lookoutequipment.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockLookoutEquipmentClient struct { +// lookoutequipmentiface.LookoutEquipmentAPI +// } +// func (m *mockLookoutEquipmentClient) CreateDataset(input *lookoutequipment.CreateDatasetInput) (*lookoutequipment.CreateDatasetOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockLookoutEquipmentClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type LookoutEquipmentAPI interface { + CreateDataset(*lookoutequipment.CreateDatasetInput) (*lookoutequipment.CreateDatasetOutput, error) + CreateDatasetWithContext(aws.Context, *lookoutequipment.CreateDatasetInput, ...request.Option) (*lookoutequipment.CreateDatasetOutput, error) + CreateDatasetRequest(*lookoutequipment.CreateDatasetInput) (*request.Request, *lookoutequipment.CreateDatasetOutput) + + CreateInferenceScheduler(*lookoutequipment.CreateInferenceSchedulerInput) (*lookoutequipment.CreateInferenceSchedulerOutput, error) + CreateInferenceSchedulerWithContext(aws.Context, *lookoutequipment.CreateInferenceSchedulerInput, ...request.Option) (*lookoutequipment.CreateInferenceSchedulerOutput, error) + CreateInferenceSchedulerRequest(*lookoutequipment.CreateInferenceSchedulerInput) (*request.Request, *lookoutequipment.CreateInferenceSchedulerOutput) + + CreateModel(*lookoutequipment.CreateModelInput) (*lookoutequipment.CreateModelOutput, error) + CreateModelWithContext(aws.Context, *lookoutequipment.CreateModelInput, ...request.Option) (*lookoutequipment.CreateModelOutput, error) + CreateModelRequest(*lookoutequipment.CreateModelInput) (*request.Request, *lookoutequipment.CreateModelOutput) + + DeleteDataset(*lookoutequipment.DeleteDatasetInput) (*lookoutequipment.DeleteDatasetOutput, error) + DeleteDatasetWithContext(aws.Context, *lookoutequipment.DeleteDatasetInput, ...request.Option) (*lookoutequipment.DeleteDatasetOutput, error) + DeleteDatasetRequest(*lookoutequipment.DeleteDatasetInput) (*request.Request, *lookoutequipment.DeleteDatasetOutput) + + DeleteInferenceScheduler(*lookoutequipment.DeleteInferenceSchedulerInput) (*lookoutequipment.DeleteInferenceSchedulerOutput, error) + DeleteInferenceSchedulerWithContext(aws.Context, *lookoutequipment.DeleteInferenceSchedulerInput, ...request.Option) (*lookoutequipment.DeleteInferenceSchedulerOutput, error) + DeleteInferenceSchedulerRequest(*lookoutequipment.DeleteInferenceSchedulerInput) (*request.Request, *lookoutequipment.DeleteInferenceSchedulerOutput) + + DeleteModel(*lookoutequipment.DeleteModelInput) (*lookoutequipment.DeleteModelOutput, error) + DeleteModelWithContext(aws.Context, *lookoutequipment.DeleteModelInput, ...request.Option) (*lookoutequipment.DeleteModelOutput, error) + DeleteModelRequest(*lookoutequipment.DeleteModelInput) (*request.Request, *lookoutequipment.DeleteModelOutput) + + DescribeDataIngestionJob(*lookoutequipment.DescribeDataIngestionJobInput) (*lookoutequipment.DescribeDataIngestionJobOutput, error) + DescribeDataIngestionJobWithContext(aws.Context, *lookoutequipment.DescribeDataIngestionJobInput, ...request.Option) (*lookoutequipment.DescribeDataIngestionJobOutput, error) + DescribeDataIngestionJobRequest(*lookoutequipment.DescribeDataIngestionJobInput) (*request.Request, *lookoutequipment.DescribeDataIngestionJobOutput) + + DescribeDataset(*lookoutequipment.DescribeDatasetInput) (*lookoutequipment.DescribeDatasetOutput, error) + DescribeDatasetWithContext(aws.Context, *lookoutequipment.DescribeDatasetInput, ...request.Option) (*lookoutequipment.DescribeDatasetOutput, error) + DescribeDatasetRequest(*lookoutequipment.DescribeDatasetInput) (*request.Request, *lookoutequipment.DescribeDatasetOutput) + + DescribeInferenceScheduler(*lookoutequipment.DescribeInferenceSchedulerInput) (*lookoutequipment.DescribeInferenceSchedulerOutput, error) + DescribeInferenceSchedulerWithContext(aws.Context, *lookoutequipment.DescribeInferenceSchedulerInput, ...request.Option) (*lookoutequipment.DescribeInferenceSchedulerOutput, error) + DescribeInferenceSchedulerRequest(*lookoutequipment.DescribeInferenceSchedulerInput) (*request.Request, *lookoutequipment.DescribeInferenceSchedulerOutput) + + DescribeModel(*lookoutequipment.DescribeModelInput) (*lookoutequipment.DescribeModelOutput, error) + DescribeModelWithContext(aws.Context, *lookoutequipment.DescribeModelInput, ...request.Option) (*lookoutequipment.DescribeModelOutput, error) + DescribeModelRequest(*lookoutequipment.DescribeModelInput) (*request.Request, *lookoutequipment.DescribeModelOutput) + + ListDataIngestionJobs(*lookoutequipment.ListDataIngestionJobsInput) (*lookoutequipment.ListDataIngestionJobsOutput, error) + ListDataIngestionJobsWithContext(aws.Context, *lookoutequipment.ListDataIngestionJobsInput, ...request.Option) (*lookoutequipment.ListDataIngestionJobsOutput, error) + ListDataIngestionJobsRequest(*lookoutequipment.ListDataIngestionJobsInput) (*request.Request, *lookoutequipment.ListDataIngestionJobsOutput) + + ListDataIngestionJobsPages(*lookoutequipment.ListDataIngestionJobsInput, func(*lookoutequipment.ListDataIngestionJobsOutput, bool) bool) error + ListDataIngestionJobsPagesWithContext(aws.Context, *lookoutequipment.ListDataIngestionJobsInput, func(*lookoutequipment.ListDataIngestionJobsOutput, bool) bool, ...request.Option) error + + ListDatasets(*lookoutequipment.ListDatasetsInput) (*lookoutequipment.ListDatasetsOutput, error) + ListDatasetsWithContext(aws.Context, *lookoutequipment.ListDatasetsInput, ...request.Option) (*lookoutequipment.ListDatasetsOutput, error) + ListDatasetsRequest(*lookoutequipment.ListDatasetsInput) (*request.Request, *lookoutequipment.ListDatasetsOutput) + + ListDatasetsPages(*lookoutequipment.ListDatasetsInput, func(*lookoutequipment.ListDatasetsOutput, bool) bool) error + ListDatasetsPagesWithContext(aws.Context, *lookoutequipment.ListDatasetsInput, func(*lookoutequipment.ListDatasetsOutput, bool) bool, ...request.Option) error + + ListInferenceExecutions(*lookoutequipment.ListInferenceExecutionsInput) (*lookoutequipment.ListInferenceExecutionsOutput, error) + ListInferenceExecutionsWithContext(aws.Context, *lookoutequipment.ListInferenceExecutionsInput, ...request.Option) (*lookoutequipment.ListInferenceExecutionsOutput, error) + ListInferenceExecutionsRequest(*lookoutequipment.ListInferenceExecutionsInput) (*request.Request, *lookoutequipment.ListInferenceExecutionsOutput) + + ListInferenceExecutionsPages(*lookoutequipment.ListInferenceExecutionsInput, func(*lookoutequipment.ListInferenceExecutionsOutput, bool) bool) error + ListInferenceExecutionsPagesWithContext(aws.Context, *lookoutequipment.ListInferenceExecutionsInput, func(*lookoutequipment.ListInferenceExecutionsOutput, bool) bool, ...request.Option) error + + ListInferenceSchedulers(*lookoutequipment.ListInferenceSchedulersInput) (*lookoutequipment.ListInferenceSchedulersOutput, error) + ListInferenceSchedulersWithContext(aws.Context, *lookoutequipment.ListInferenceSchedulersInput, ...request.Option) (*lookoutequipment.ListInferenceSchedulersOutput, error) + ListInferenceSchedulersRequest(*lookoutequipment.ListInferenceSchedulersInput) (*request.Request, *lookoutequipment.ListInferenceSchedulersOutput) + + ListInferenceSchedulersPages(*lookoutequipment.ListInferenceSchedulersInput, func(*lookoutequipment.ListInferenceSchedulersOutput, bool) bool) error + ListInferenceSchedulersPagesWithContext(aws.Context, *lookoutequipment.ListInferenceSchedulersInput, func(*lookoutequipment.ListInferenceSchedulersOutput, bool) bool, ...request.Option) error + + ListModels(*lookoutequipment.ListModelsInput) (*lookoutequipment.ListModelsOutput, error) + ListModelsWithContext(aws.Context, *lookoutequipment.ListModelsInput, ...request.Option) (*lookoutequipment.ListModelsOutput, error) + ListModelsRequest(*lookoutequipment.ListModelsInput) (*request.Request, *lookoutequipment.ListModelsOutput) + + ListModelsPages(*lookoutequipment.ListModelsInput, func(*lookoutequipment.ListModelsOutput, bool) bool) error + ListModelsPagesWithContext(aws.Context, *lookoutequipment.ListModelsInput, func(*lookoutequipment.ListModelsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*lookoutequipment.ListTagsForResourceInput) (*lookoutequipment.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *lookoutequipment.ListTagsForResourceInput, ...request.Option) (*lookoutequipment.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*lookoutequipment.ListTagsForResourceInput) (*request.Request, *lookoutequipment.ListTagsForResourceOutput) + + StartDataIngestionJob(*lookoutequipment.StartDataIngestionJobInput) (*lookoutequipment.StartDataIngestionJobOutput, error) + StartDataIngestionJobWithContext(aws.Context, *lookoutequipment.StartDataIngestionJobInput, ...request.Option) (*lookoutequipment.StartDataIngestionJobOutput, error) + StartDataIngestionJobRequest(*lookoutequipment.StartDataIngestionJobInput) (*request.Request, *lookoutequipment.StartDataIngestionJobOutput) + + StartInferenceScheduler(*lookoutequipment.StartInferenceSchedulerInput) (*lookoutequipment.StartInferenceSchedulerOutput, error) + StartInferenceSchedulerWithContext(aws.Context, *lookoutequipment.StartInferenceSchedulerInput, ...request.Option) (*lookoutequipment.StartInferenceSchedulerOutput, error) + StartInferenceSchedulerRequest(*lookoutequipment.StartInferenceSchedulerInput) (*request.Request, *lookoutequipment.StartInferenceSchedulerOutput) + + StopInferenceScheduler(*lookoutequipment.StopInferenceSchedulerInput) (*lookoutequipment.StopInferenceSchedulerOutput, error) + StopInferenceSchedulerWithContext(aws.Context, *lookoutequipment.StopInferenceSchedulerInput, ...request.Option) (*lookoutequipment.StopInferenceSchedulerOutput, error) + StopInferenceSchedulerRequest(*lookoutequipment.StopInferenceSchedulerInput) (*request.Request, *lookoutequipment.StopInferenceSchedulerOutput) + + TagResource(*lookoutequipment.TagResourceInput) (*lookoutequipment.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *lookoutequipment.TagResourceInput, ...request.Option) (*lookoutequipment.TagResourceOutput, error) + TagResourceRequest(*lookoutequipment.TagResourceInput) (*request.Request, *lookoutequipment.TagResourceOutput) + + UntagResource(*lookoutequipment.UntagResourceInput) (*lookoutequipment.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *lookoutequipment.UntagResourceInput, ...request.Option) (*lookoutequipment.UntagResourceOutput, error) + UntagResourceRequest(*lookoutequipment.UntagResourceInput) (*request.Request, *lookoutequipment.UntagResourceOutput) + + UpdateInferenceScheduler(*lookoutequipment.UpdateInferenceSchedulerInput) (*lookoutequipment.UpdateInferenceSchedulerOutput, error) + UpdateInferenceSchedulerWithContext(aws.Context, *lookoutequipment.UpdateInferenceSchedulerInput, ...request.Option) (*lookoutequipment.UpdateInferenceSchedulerOutput, error) + UpdateInferenceSchedulerRequest(*lookoutequipment.UpdateInferenceSchedulerInput) (*request.Request, *lookoutequipment.UpdateInferenceSchedulerOutput) +} + +var _ LookoutEquipmentAPI = (*lookoutequipment.LookoutEquipment)(nil) diff --git a/service/lookoutequipment/service.go b/service/lookoutequipment/service.go new file mode 100644 index 00000000000..dbe70f4da3d --- /dev/null +++ b/service/lookoutequipment/service.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package lookoutequipment + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// LookoutEquipment provides the API operation methods for making requests to +// Amazon Lookout for Equipment. See this package's package overview docs +// for details on the service. +// +// LookoutEquipment methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type LookoutEquipment struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "LookoutEquipment" // Name of service. + EndpointsID = "lookoutequipment" // ID to lookup a service endpoint with. + ServiceID = "LookoutEquipment" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the LookoutEquipment client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a LookoutEquipment client from just a session. +// svc := lookoutequipment.New(mySession) +// +// // Create a LookoutEquipment client with additional configuration +// svc := lookoutequipment.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *LookoutEquipment { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LookoutEquipment { + svc := &LookoutEquipment{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2020-12-15", + JSONVersion: "1.0", + TargetPrefix: "AWSLookoutEquipmentFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a LookoutEquipment operation and runs any +// custom request initialization. +func (c *LookoutEquipment) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/service/ram/api.go b/service/ram/api.go index 9edc288d208..2ae1501944f 100644 --- a/service/ram/api.go +++ b/service/ram/api.go @@ -2892,7 +2892,9 @@ type AssociateResourceShareInput struct { // of the request. ClientToken *string `locationName:"clientToken" type:"string"` - // The principals. + // The principals to associate with the resource share. The possible values + // are IDs of AWS accounts, and the ARNs of organizational units (OU) or organizations + // from AWS Organizations. Principals []*string `locationName:"principals" type:"list"` // The Amazon Resource Names (ARN) of the resources. @@ -4666,11 +4668,15 @@ type ListPrincipalsInput struct { // The resource type. // - // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation - // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project + // | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable + // | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe - // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster - // | route53resolver:ResolverRule + // | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table + // | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy + // | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup + // | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup + // |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } @@ -4973,11 +4979,15 @@ type ListResourcesInput struct { // The resource type. // - // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation - // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project + // | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable + // | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe - // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster - // | route53resolver:ResolverRule + // | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table + // | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy + // | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup + // | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup + // |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } diff --git a/service/robomaker/api.go b/service/robomaker/api.go index 3e360d080a8..77618849326 100644 --- a/service/robomaker/api.go +++ b/service/robomaker/api.go @@ -14154,9 +14154,16 @@ type RobotApplicationConfig struct { // LaunchConfig is a required field LaunchConfig *LaunchConfig `locationName:"launchConfig" type:"structure" required:"true"` + // Information about tools configured for the robot application. + Tools []*Tool `locationName:"tools" type:"list"` + // The upload configurations for the robot application. UploadConfigurations []*UploadConfiguration `locationName:"uploadConfigurations" type:"list"` + // A Boolean indicating whether to use default robot application tools. The + // default tools are rviz, rqt, terminal and rosbag record. The default is False. + UseDefaultTools *bool `locationName:"useDefaultTools" type:"boolean"` + // A Boolean indicating whether to use default upload configurations. By default, // .ros and .gazebo files are uploaded when the application terminates and all // ROS topics will be recorded. @@ -14195,6 +14202,16 @@ func (s *RobotApplicationConfig) Validate() error { invalidParams.AddNested("LaunchConfig", err.(request.ErrInvalidParams)) } } + if s.Tools != nil { + for i, v := range s.Tools { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tools", i), err.(request.ErrInvalidParams)) + } + } + } if s.UploadConfigurations != nil { for i, v := range s.UploadConfigurations { if v == nil { @@ -14230,12 +14247,24 @@ func (s *RobotApplicationConfig) SetLaunchConfig(v *LaunchConfig) *RobotApplicat return s } +// SetTools sets the Tools field's value. +func (s *RobotApplicationConfig) SetTools(v []*Tool) *RobotApplicationConfig { + s.Tools = v + return s +} + // SetUploadConfigurations sets the UploadConfigurations field's value. func (s *RobotApplicationConfig) SetUploadConfigurations(v []*UploadConfiguration) *RobotApplicationConfig { s.UploadConfigurations = v return s } +// SetUseDefaultTools sets the UseDefaultTools field's value. +func (s *RobotApplicationConfig) SetUseDefaultTools(v bool) *RobotApplicationConfig { + s.UseDefaultTools = &v + return s +} + // SetUseDefaultUploadConfigurations sets the UseDefaultUploadConfigurations field's value. func (s *RobotApplicationConfig) SetUseDefaultUploadConfigurations(v bool) *RobotApplicationConfig { s.UseDefaultUploadConfigurations = &v @@ -14588,9 +14617,17 @@ type SimulationApplicationConfig struct { // LaunchConfig is a required field LaunchConfig *LaunchConfig `locationName:"launchConfig" type:"structure" required:"true"` + // Information about tools configured for the simulation application. + Tools []*Tool `locationName:"tools" type:"list"` + // Information about upload configurations for the simulation application. UploadConfigurations []*UploadConfiguration `locationName:"uploadConfigurations" type:"list"` + // A Boolean indicating whether to use default simulation application tools. + // The default tools are rviz, rqt, terminal and rosbag record. The default + // is False. + UseDefaultTools *bool `locationName:"useDefaultTools" type:"boolean"` + // A Boolean indicating whether to use default upload configurations. By default, // .ros and .gazebo files are uploaded when the application terminates and all // ROS topics will be recorded. @@ -14632,6 +14669,16 @@ func (s *SimulationApplicationConfig) Validate() error { invalidParams.AddNested("LaunchConfig", err.(request.ErrInvalidParams)) } } + if s.Tools != nil { + for i, v := range s.Tools { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tools", i), err.(request.ErrInvalidParams)) + } + } + } if s.UploadConfigurations != nil { for i, v := range s.UploadConfigurations { if v == nil { @@ -14677,12 +14724,24 @@ func (s *SimulationApplicationConfig) SetLaunchConfig(v *LaunchConfig) *Simulati return s } +// SetTools sets the Tools field's value. +func (s *SimulationApplicationConfig) SetTools(v []*Tool) *SimulationApplicationConfig { + s.Tools = v + return s +} + // SetUploadConfigurations sets the UploadConfigurations field's value. func (s *SimulationApplicationConfig) SetUploadConfigurations(v []*UploadConfiguration) *SimulationApplicationConfig { s.UploadConfigurations = v return s } +// SetUseDefaultTools sets the UseDefaultTools field's value. +func (s *SimulationApplicationConfig) SetUseDefaultTools(v bool) *SimulationApplicationConfig { + s.UseDefaultTools = &v + return s +} + // SetUseDefaultUploadConfigurations sets the UseDefaultUploadConfigurations field's value. func (s *SimulationApplicationConfig) SetUseDefaultUploadConfigurations(v bool) *SimulationApplicationConfig { s.UseDefaultUploadConfigurations = &v @@ -16231,6 +16290,99 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } +// Information about a tool. Tools are used in a simulation job. +type Tool struct { + _ struct{} `type:"structure"` + + // Command-line arguments for the tool. It must include the tool executable + // name. + // + // Command is a required field + Command *string `locationName:"command" min:"1" type:"string" required:"true"` + + // Exit behavior determines what happens when your tool quits running. RESTART + // will cause your tool to be restarted. FAIL will cause your job to exit. The + // default is RESTART. + ExitBehavior *string `locationName:"exitBehavior" type:"string" enum:"ExitBehavior"` + + // The name of the tool. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // Boolean indicating whether logs will be recorded in CloudWatch for the tool. + // The default is False. + StreamOutputToCloudWatch *bool `locationName:"streamOutputToCloudWatch" type:"boolean"` + + // Boolean indicating whether a streaming session will be configured for the + // tool. If True, AWS RoboMaker will configure a connection so you can interact + // with the tool as it is running in the simulation. It must have a graphical + // user interface. The default is False. + StreamUI *bool `locationName:"streamUI" type:"boolean"` +} + +// String returns the string representation +func (s Tool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tool) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tool) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tool"} + if s.Command == nil { + invalidParams.Add(request.NewErrParamRequired("Command")) + } + if s.Command != nil && len(*s.Command) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Command", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommand sets the Command field's value. +func (s *Tool) SetCommand(v string) *Tool { + s.Command = &v + return s +} + +// SetExitBehavior sets the ExitBehavior field's value. +func (s *Tool) SetExitBehavior(v string) *Tool { + s.ExitBehavior = &v + return s +} + +// SetName sets the Name field's value. +func (s *Tool) SetName(v string) *Tool { + s.Name = &v + return s +} + +// SetStreamOutputToCloudWatch sets the StreamOutputToCloudWatch field's value. +func (s *Tool) SetStreamOutputToCloudWatch(v bool) *Tool { + s.StreamOutputToCloudWatch = &v + return s +} + +// SetStreamUI sets the StreamUI field's value. +func (s *Tool) SetStreamUI(v bool) *Tool { + s.StreamUI = &v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -16835,7 +16987,7 @@ type UploadConfiguration struct { // Path is a required field Path *string `locationName:"path" min:"1" type:"string" required:"true"` - // Specifies how to upload the files: + // Specifies when to upload the files: // // UPLOAD_ON_TERMINATE // @@ -17479,8 +17631,20 @@ const ( // DeploymentJobErrorCodeDownloadConditionFailed is a DeploymentJobErrorCode enum value DeploymentJobErrorCodeDownloadConditionFailed = "DownloadConditionFailed" + // DeploymentJobErrorCodeBadLambdaAssociated is a DeploymentJobErrorCode enum value + DeploymentJobErrorCodeBadLambdaAssociated = "BadLambdaAssociated" + // DeploymentJobErrorCodeInternalServerError is a DeploymentJobErrorCode enum value DeploymentJobErrorCodeInternalServerError = "InternalServerError" + + // DeploymentJobErrorCodeRobotApplicationDoesNotExist is a DeploymentJobErrorCode enum value + DeploymentJobErrorCodeRobotApplicationDoesNotExist = "RobotApplicationDoesNotExist" + + // DeploymentJobErrorCodeDeploymentFleetDoesNotExist is a DeploymentJobErrorCode enum value + DeploymentJobErrorCodeDeploymentFleetDoesNotExist = "DeploymentFleetDoesNotExist" + + // DeploymentJobErrorCodeFleetDeploymentTimeout is a DeploymentJobErrorCode enum value + DeploymentJobErrorCodeFleetDeploymentTimeout = "FleetDeploymentTimeout" ) // DeploymentJobErrorCode_Values returns all elements of the DeploymentJobErrorCode enum @@ -17505,7 +17669,11 @@ func DeploymentJobErrorCode_Values() []string { DeploymentJobErrorCodePostLaunchFileFailure, DeploymentJobErrorCodeBadPermissionError, DeploymentJobErrorCodeDownloadConditionFailed, + DeploymentJobErrorCodeBadLambdaAssociated, DeploymentJobErrorCodeInternalServerError, + DeploymentJobErrorCodeRobotApplicationDoesNotExist, + DeploymentJobErrorCodeDeploymentFleetDoesNotExist, + DeploymentJobErrorCodeFleetDeploymentTimeout, } } @@ -17541,6 +17709,22 @@ func DeploymentStatus_Values() []string { } } +const ( + // ExitBehaviorFail is a ExitBehavior enum value + ExitBehaviorFail = "FAIL" + + // ExitBehaviorRestart is a ExitBehavior enum value + ExitBehaviorRestart = "RESTART" +) + +// ExitBehavior_Values returns all elements of the ExitBehavior enum +func ExitBehavior_Values() []string { + return []string{ + ExitBehaviorFail, + ExitBehaviorRestart, + } +} + const ( // FailureBehaviorFail is a FailureBehavior enum value FailureBehaviorFail = "Fail" @@ -17743,6 +17927,12 @@ const ( // SimulationJobErrorCodeSimulationApplicationCrash is a SimulationJobErrorCode enum value SimulationJobErrorCodeSimulationApplicationCrash = "SimulationApplicationCrash" + // SimulationJobErrorCodeRobotApplicationHealthCheckFailure is a SimulationJobErrorCode enum value + SimulationJobErrorCodeRobotApplicationHealthCheckFailure = "RobotApplicationHealthCheckFailure" + + // SimulationJobErrorCodeSimulationApplicationHealthCheckFailure is a SimulationJobErrorCode enum value + SimulationJobErrorCodeSimulationApplicationHealthCheckFailure = "SimulationApplicationHealthCheckFailure" + // SimulationJobErrorCodeBadPermissionsRobotApplication is a SimulationJobErrorCode enum value SimulationJobErrorCodeBadPermissionsRobotApplication = "BadPermissionsRobotApplication" @@ -17776,6 +17966,9 @@ const ( // SimulationJobErrorCodeInvalidS3resource is a SimulationJobErrorCode enum value SimulationJobErrorCodeInvalidS3resource = "InvalidS3Resource" + // SimulationJobErrorCodeThrottlingError is a SimulationJobErrorCode enum value + SimulationJobErrorCodeThrottlingError = "ThrottlingError" + // SimulationJobErrorCodeLimitExceeded is a SimulationJobErrorCode enum value SimulationJobErrorCodeLimitExceeded = "LimitExceeded" @@ -17825,6 +18018,8 @@ func SimulationJobErrorCode_Values() []string { SimulationJobErrorCodeInternalServiceError, SimulationJobErrorCodeRobotApplicationCrash, SimulationJobErrorCodeSimulationApplicationCrash, + SimulationJobErrorCodeRobotApplicationHealthCheckFailure, + SimulationJobErrorCodeSimulationApplicationHealthCheckFailure, SimulationJobErrorCodeBadPermissionsRobotApplication, SimulationJobErrorCodeBadPermissionsSimulationApplication, SimulationJobErrorCodeBadPermissionsS3object, @@ -17836,6 +18031,7 @@ func SimulationJobErrorCode_Values() []string { SimulationJobErrorCodeInvalidBundleRobotApplication, SimulationJobErrorCodeInvalidBundleSimulationApplication, SimulationJobErrorCodeInvalidS3resource, + SimulationJobErrorCodeThrottlingError, SimulationJobErrorCodeLimitExceeded, SimulationJobErrorCodeMismatchedEtag, SimulationJobErrorCodeRobotApplicationVersionMismatchedEtag,