diff --git a/.changes/1.31.3.json b/.changes/1.31.3.json new file mode 100644 index 0000000000..f91cd2961b --- /dev/null +++ b/.changes/1.31.3.json @@ -0,0 +1,67 @@ +[ + { + "category": "``cognito-idp``", + "description": "API model updated in Amazon Cognito", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Add support for deleting Queues and Routing Profiles.", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "Added LunCount to the response object of DescribeStorageSystemResourcesResponse, LunCount represents the number of LUNs on a storage system resource.", + "type": "api-change" + }, + { + "category": "``dms``", + "description": "Enhanced PostgreSQL target endpoint settings for providing Babelfish support.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds support for the C7gn and Hpc7g instances. C7gn instances are powered by AWS Graviton3 processors and the fifth-generation AWS Nitro Cards. Hpc7g instances are powered by AWS Graviton 3E processors and provide up to 200 Gbps network bandwidth.", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "Amazon FSx for NetApp ONTAP now supports SnapLock, an ONTAP feature that enables you to protect your files in a volume by transitioning them to a write once, read many (WORM) state.", + "type": "api-change" + }, + { + "category": "``iam``", + "description": "Documentation updates for AWS Identity and Access Management (IAM).", + "type": "api-change" + }, + { + "category": "``mediatailor``", + "description": "Adds categories to MediaTailor channel assembly alerts", + "type": "api-change" + }, + { + "category": "``personalize``", + "description": "This release provides ability to customers to change schema associated with their datasets in Amazon Personalize", + "type": "api-change" + }, + { + "category": "``proton``", + "description": "This release adds support for deployment history for Proton provisioned resources", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Canvas adds WorkspeceSettings support for CanvasAppSettings", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Documentation updates for Secrets Manager", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e7e1b9b7dc..d2b92fc999 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,24 @@ CHANGELOG ========= +1.31.3 +====== + +* api-change:``cognito-idp``: API model updated in Amazon Cognito +* api-change:``connect``: Add support for deleting Queues and Routing Profiles. +* api-change:``datasync``: Added LunCount to the response object of DescribeStorageSystemResourcesResponse, LunCount represents the number of LUNs on a storage system resource. +* api-change:``dms``: Enhanced PostgreSQL target endpoint settings for providing Babelfish support. +* api-change:``ec2``: This release adds support for the C7gn and Hpc7g instances. C7gn instances are powered by AWS Graviton3 processors and the fifth-generation AWS Nitro Cards. Hpc7g instances are powered by AWS Graviton 3E processors and provide up to 200 Gbps network bandwidth. +* api-change:``fsx``: Amazon FSx for NetApp ONTAP now supports SnapLock, an ONTAP feature that enables you to protect your files in a volume by transitioning them to a write once, read many (WORM) state. +* api-change:``iam``: Documentation updates for AWS Identity and Access Management (IAM). +* api-change:``mediatailor``: Adds categories to MediaTailor channel assembly alerts +* api-change:``personalize``: This release provides ability to customers to change schema associated with their datasets in Amazon Personalize +* api-change:``proton``: This release adds support for deployment history for Proton provisioned resources +* api-change:``s3``: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. +* api-change:``sagemaker``: Amazon SageMaker Canvas adds WorkspeceSettings support for CanvasAppSettings +* api-change:``secretsmanager``: Documentation updates for Secrets Manager + + 1.31.2 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index dfaed08ab4..5997095905 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.31.2' +__version__ = '1.31.3' class NullHandler(logging.Handler): diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index abcf0510e7..0426da05be 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -6947,7 +6947,8 @@ "type":"string", "max":6, "min":6, - "pattern":"[0-9]+" + "pattern":"[0-9]+", + "sensitive":true }, "SoftwareTokenMfaConfigType":{ "type":"structure", @@ -7718,7 +7719,8 @@ "documentation":"

Encoded device-fingerprint details that your app collected with the Amazon Cognito context data collection library. For more information, see Adding user device and session data to API requests.

" } }, - "documentation":"

Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

" + "documentation":"

Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

", + "sensitive":true }, "UserFilterType":{ "type":"string", diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 37cc6a6c04..1cc5b2c287 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -746,6 +746,23 @@ ], "documentation":"

Deletes a prompt.

" }, + "DeleteQueue":{ + "name":"DeleteQueue", + "http":{ + "method":"DELETE", + "requestUri":"/queues/{InstanceId}/{QueueId}" + }, + "input":{"shape":"DeleteQueueRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes a queue.

" + }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", "http":{ @@ -762,6 +779,23 @@ ], "documentation":"

Deletes a quick connect.

" }, + "DeleteRoutingProfile":{ + "name":"DeleteRoutingProfile", + "http":{ + "method":"DELETE", + "requestUri":"/routing-profiles/{InstanceId}/{RoutingProfileId}" + }, + "input":{"shape":"DeleteRoutingProfileRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes a routing profile.

" + }, "DeleteRule":{ "name":"DeleteRule", "http":{ @@ -5977,6 +6011,27 @@ } } }, + "DeleteQueueRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "QueueId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "QueueId":{ + "shape":"QueueId", + "documentation":"

The identifier for the queue.

", + "location":"uri", + "locationName":"QueueId" + } + } + }, "DeleteQuickConnectRequest":{ "type":"structure", "required":[ @@ -5998,6 +6053,27 @@ } } }, + "DeleteRoutingProfileRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "RoutingProfileId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile.

", + "location":"uri", + "locationName":"RoutingProfileId" + } + } + }, "DeleteRuleRequest":{ "type":"structure", "required":[ @@ -9148,7 +9224,7 @@ }, "InstanceAccessUrl":{ "shape":"Url", - "documentation":"

This URL allows contact center users to access Amazon Connect admin website.

" + "documentation":"

This URL allows contact center users to access the Amazon Connect admin website.

" } }, "documentation":"

The Amazon Connect instance.

" @@ -9297,7 +9373,7 @@ }, "InstanceAccessUrl":{ "shape":"Url", - "documentation":"

This URL allows contact center users to access Amazon Connect admin website.

" + "documentation":"

This URL allows contact center users to access the Amazon Connect admin website.

" } }, "documentation":"

Information about the instance.

" diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 7dad1a3f49..26461af8c8 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -55,7 +55,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Activates an DataSync agent that you have deployed in your storage environment. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region.

You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.

Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

" + "documentation":"

Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account.

If you haven't deployed an agent yet, see the following topics to learn more:

If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent.

" }, "CreateLocationEfs":{ "name":"CreateLocationEfs", @@ -983,27 +983,27 @@ "members":{ "ActivationKey":{ "shape":"ActivationKey", - "documentation":"

Your agent activation key. You can get the activation key either by sending an HTTP GET request with redirects that enable you to get the agent IP address (port 80). Alternatively, you can get it from the DataSync console.

The redirect URL returned in the response provides you the activation key for your agent in the query string parameter activationKey. It might also include other activation-related parameters; however, these are merely defaults. The arguments you pass to this API call determine the actual configuration of your agent.

For more information, see Activating an Agent in the DataSync User Guide.

" + "documentation":"

Specifies your DataSync agent's activation key. If you don't have an activation key, see Activate your agent.

" }, "AgentName":{ "shape":"TagValue", - "documentation":"

The name you configured for your agent. This value is a text reference that is used to identify the agent in the console.

" + "documentation":"

Specifies a name for your agent. You can see this name in the DataSync console.

" }, "Tags":{ "shape":"InputTagList", - "documentation":"

The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

" + "documentation":"

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least one tag for your agent.

" }, "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the VPC (virtual private cloud) endpoint that the agent has access to. This is the client-side VPC endpoint, also called a PrivateLink. If you don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service Configuration in the Amazon VPC User Guide.

VPC endpoint ID looks like this: vpce-01234d5aff67890e1.

" + "documentation":"

Specifies the ID of the VPC endpoint that you want your agent to connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1.

The VPC endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync).

" }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the subnets in which DataSync will create elastic network interfaces for each data transfer task. The agent that runs a task must be private. When you start a task that is associated with an agent created in a VPC, or one that has access to an IP address in a VPC, then the task is also private. In this case, DataSync creates four network interfaces for each task in your subnet. For a data transfer to work, the agent must be able to route to all these four network interfaces.

" + "documentation":"

Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer.

" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", - "documentation":"

The ARNs of the security groups used to protect your data transfer task subnets. See SecurityGroupArns.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint.

" } }, "documentation":"

CreateAgentRequest

" @@ -1013,7 +1013,7 @@ "members":{ "AgentArn":{ "shape":"AgentArn", - "documentation":"

The Amazon Resource Name (ARN) of the agent. Use the ListAgents operation to return a list of agents for your account and Amazon Web Services Region.

" + "documentation":"

The ARN of the agent that you just activated. Use the ListAgents operation to return a list of agents in your Amazon Web Services account and Amazon Web Services Region.

" } }, "documentation":"

CreateAgentResponse

" @@ -1202,7 +1202,7 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

Specifies the user who has the permissions to access files and folders in the file system.

For information about choosing a user name that ensures sufficient permissions to files, folders, and metadata, see user.

" + "documentation":"

Specifies the user who has the permissions to access files, folders, and metadata in your file system.

For information about choosing a user with sufficient permissions, see Required permissions.

" }, "Domain":{ "shape":"SmbDomain", @@ -1381,7 +1381,7 @@ }, "ServerCertificate":{ "shape":"ObjectStorageCertificate", - "documentation":"

Specifies a certificate to authenticate with an object storage system that uses a private or self-signed certificate authority (CA). You must specify a Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). The certificate can be up to 32768 bytes (before Base64 encoding).

To use this parameter, configure ServerProtocol to HTTPS.

" + "documentation":"

Specifies a file with the certificates that are used to sign the object storage server's certificate (for example, file:///home/user/.ssh/storage_sys_certificate.pem). The file you specify must include the following:

The file can be up to 32768 bytes (before base64 encoding).

To use this parameter, configure ServerProtocol to HTTPS.

" } }, "documentation":"

CreateLocationObjectStorageRequest

" @@ -3321,6 +3321,10 @@ "RecommendationStatus":{ "shape":"RecommendationStatus", "documentation":"

Indicates whether DataSync Discovery recommendations for the cluster are ready to view, incomplete, or can't be determined.

For more information, see Recommendation statuses.

" + }, + "LunCount":{ + "shape":"NonNegativeLong", + "documentation":"

The number of LUNs (logical unit numbers) in the cluster.

" } }, "documentation":"

The information that DataSync Discovery collects about an on-premises storage system cluster.

" @@ -3383,6 +3387,10 @@ "TotalSnapshotCapacityUsed":{ "shape":"NonNegativeLong", "documentation":"

The amount of storage in the SVM that's being used for snapshots.

" + }, + "LunCount":{ + "shape":"NonNegativeLong", + "documentation":"

The number of LUNs (logical unit numbers) in the SVM.

" } }, "documentation":"

The information that DataSync Discovery collects about a storage virtual machine (SVM) in your on-premises storage system.

" @@ -3449,6 +3457,10 @@ "RecommendationStatus":{ "shape":"RecommendationStatus", "documentation":"

Indicates whether DataSync Discovery recommendations for the volume are ready to view, incomplete, or can't be determined.

For more information, see Recommendation statuses.

" + }, + "LunCount":{ + "shape":"NonNegativeLong", + "documentation":"

The number of LUNs (logical unit numbers) in the volume.

" } }, "documentation":"

The information that DataSync Discovery collects about a volume in your on-premises storage system.

" @@ -3615,7 +3627,7 @@ }, "TaskQueueing":{ "shape":"TaskQueueing", - "documentation":"

Specifies whether tasks should be queued before executing the tasks. The default is ENABLED, which means the tasks will be queued.

If you use the same agent to run multiple tasks, you can enable the tasks to run in series. For more information, see Queueing task executions.

" + "documentation":"

Specifies whether your transfer tasks should be put into a queue during certain scenarios when running multiple tasks. This is ENABLED by default.

" }, "LogLevel":{ "shape":"LogLevel", @@ -4700,5 +4712,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"DataSync

DataSync is a managed data transfer service that makes it simpler for you to automate moving data between on-premises storage and Amazon Web Services storage services. You also can use DataSync to transfer data between other cloud providers and Amazon Web Services storage services.

This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

" + "documentation":"DataSync

DataSync is an online data movement and discovery service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services.

This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

" } diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index 7c9a25e239..7a32af8d2f 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -2161,6 +2161,13 @@ "type":"list", "member":{"shape":"DatabaseResponse"} }, + "DatabaseMode":{ + "type":"string", + "enum":[ + "default", + "babelfish" + ] + }, "DatabaseResponse":{ "type":"structure", "members":{ @@ -5582,6 +5589,14 @@ "MapLongVarcharAs":{ "shape":"LongVarcharMappingType", "documentation":"

When true, DMS migrates LONG values as VARCHAR.

" + }, + "DatabaseMode":{ + "shape":"DatabaseMode", + "documentation":"

Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use this setting to specify replication behavior for endpoints that require additional configuration, such as Babelfish endpoints.

" + }, + "BabelfishDatabaseName":{ + "shape":"String", + "documentation":"

The Babelfish for Aurora PostgreSQL database name for the endpoint.

" } }, "documentation":"

Provides information that defines a PostgreSQL endpoint.

" diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 3a860699dc..928d7cebc4 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -35274,7 +35274,18 @@ "i4g.2xlarge", "i4g.4xlarge", "i4g.8xlarge", - "i4g.16xlarge" + "i4g.16xlarge", + "hpc7g.4xlarge", + "hpc7g.8xlarge", + "hpc7g.16xlarge", + "c7gn.medium", + "c7gn.large", + "c7gn.xlarge", + "c7gn.2xlarge", + "c7gn.4xlarge", + "c7gn.8xlarge", + "c7gn.12xlarge", + "c7gn.16xlarge" ] }, "InstanceTypeHypervisor":{ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 3d1d6c722f..0e1d6f0b02 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -7952,27 +7952,45 @@ "ap-northeast-2" : { "hostname" : "internetmonitor.ap-northeast-2.api.aws" }, + "ap-northeast-3" : { + "hostname" : "internetmonitor.ap-northeast-3.api.aws" + }, "ap-south-1" : { "hostname" : "internetmonitor.ap-south-1.api.aws" }, + "ap-south-2" : { + "hostname" : "internetmonitor.ap-south-2.api.aws" + }, "ap-southeast-1" : { "hostname" : "internetmonitor.ap-southeast-1.api.aws" }, "ap-southeast-2" : { "hostname" : "internetmonitor.ap-southeast-2.api.aws" }, + "ap-southeast-3" : { + "hostname" : "internetmonitor.ap-southeast-3.api.aws" + }, + "ap-southeast-4" : { + "hostname" : "internetmonitor.ap-southeast-4.api.aws" + }, "ca-central-1" : { "hostname" : "internetmonitor.ca-central-1.api.aws" }, "eu-central-1" : { "hostname" : "internetmonitor.eu-central-1.api.aws" }, + "eu-central-2" : { + "hostname" : "internetmonitor.eu-central-2.api.aws" + }, "eu-north-1" : { "hostname" : "internetmonitor.eu-north-1.api.aws" }, "eu-south-1" : { "hostname" : "internetmonitor.eu-south-1.api.aws" }, + "eu-south-2" : { + "hostname" : "internetmonitor.eu-south-2.api.aws" + }, "eu-west-1" : { "hostname" : "internetmonitor.eu-west-1.api.aws" }, @@ -7982,6 +8000,9 @@ "eu-west-3" : { "hostname" : "internetmonitor.eu-west-3.api.aws" }, + "me-central-1" : { + "hostname" : "internetmonitor.me-central-1.api.aws" + }, "me-south-1" : { "hostname" : "internetmonitor.me-south-1.api.aws" }, @@ -10160,6 +10181,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -15385,17 +15407,77 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ssm-incidents-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ssm-incidents-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ssm-incidents-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ssm-incidents-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ssm-incidents-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ssm-incidents-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "ssm-incidents-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ssm-incidents-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ssm-incidents-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ssm-incidents-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "ssm-sap" : { @@ -16282,6 +16364,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "transfer-fips.ca-central-1.amazonaws.com", @@ -18758,6 +18841,18 @@ } } }, + "savingsplans" : { + "endpoints" : { + "aws-cn" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "savingsplans.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn" + }, "secretsmanager" : { "endpoints" : { "cn-north-1" : { }, @@ -22482,6 +22577,18 @@ } } }, + "savingsplans" : { + "endpoints" : { + "aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "savingsplans.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-us-gov-global" + }, "secretsmanager" : { "endpoints" : { "us-gov-east-1" : { diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index bf179cda82..3f392d54c8 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -692,7 +692,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

For FSx for Lustre file systems, you can update the following properties:

For FSx for ONTAP file systems, you can update the following properties:

For FSx for OpenZFS file systems, you can update the following properties:

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

For FSx for Lustre file systems, you can update the following properties:

For FSx for ONTAP file systems, you can update the following properties:

For FSx for OpenZFS file systems, you can update the following properties:

" }, "UpdateSnapshot":{ "name":"UpdateSnapshot", @@ -971,6 +971,37 @@ "NEW_CHANGED_DELETED" ] }, + "AutocommitPeriod":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"AutocommitPeriodType", + "documentation":"

Defines the type of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. Setting this value to NONE disables autocommit. The default value is NONE.

" + }, + "Value":{ + "shape":"AutocommitPeriodValue", + "documentation":"

Defines the amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume. The following ranges are valid:

" + } + }, + "documentation":"

Sets the autocommit period of files in an FSx for ONTAP SnapLock volume, which determines how long the files must remain unmodified before they're automatically transitioned to the write once, read many (WORM) state.

For more information, see Autocommit.

" + }, + "AutocommitPeriodType":{ + "type":"string", + "enum":[ + "MINUTES", + "HOURS", + "DAYS", + "MONTHS", + "YEARS", + "NONE" + ] + }, + "AutocommitPeriodValue":{ + "type":"integer", + "max":65535, + "min":1 + }, "AutomaticBackupRetentionDays":{ "type":"integer", "documentation":"

The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30.

", @@ -1764,7 +1795,7 @@ }, "SizeInMegabytes":{ "shape":"VolumeCapacity", - "documentation":"

Specifies the size of the volume, in megabytes (MB), that you are creating. Provide any whole number in the range of 20–104857600 to specify the size of the volume.

" + "documentation":"

Specifies the size of the volume, in megabytes (MB), that you are creating.

" }, "StorageEfficiencyEnabled":{ "shape":"Flag", @@ -1786,6 +1817,10 @@ "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.

" + }, + "SnaplockConfiguration":{ + "shape":"CreateSnaplockConfiguration", + "documentation":"

Specifies the SnapLock configuration for an FSx for ONTAP volume.

" } }, "documentation":"

Specifies the configuration of the ONTAP volume that you are creating.

" @@ -1852,6 +1887,37 @@ }, "documentation":"

Specifies the configuration of the Amazon FSx for OpenZFS volume that you are creating.

" }, + "CreateSnaplockConfiguration":{ + "type":"structure", + "required":["SnaplockType"], + "members":{ + "AuditLogVolume":{ + "shape":"Flag", + "documentation":"

Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is false. If you set AuditLogVolume to true, the SnapLock volume is created as an audit log volume. The minimum retention period for an audit log volume is six months.

For more information, see SnapLock audit log volumes.

" + }, + "AutocommitPeriod":{ + "shape":"AutocommitPeriod", + "documentation":"

The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.

" + }, + "PrivilegedDelete":{ + "shape":"PrivilegedDelete", + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete WORM files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + }, + "RetentionPeriod":{ + "shape":"SnaplockRetentionPeriod", + "documentation":"

Specifies the retention period of an FSx for ONTAP SnapLock volume.

" + }, + "SnaplockType":{ + "shape":"SnaplockType", + "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

" + }, + "VolumeAppendModeEnabled":{ + "shape":"Flag", + "documentation":"

Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to create WORM-appendable files and write data to them incrementally. The default value is false.

For more information, see Volume-append mode.

" + } + }, + "documentation":"

Defines the SnapLock configuration when creating an FSx for ONTAP SnapLock volume.

" + }, "CreateSnapshotRequest":{ "type":"structure", "required":[ @@ -2643,9 +2709,13 @@ "shape":"Flag", "documentation":"

Set to true if you want to skip taking a final backup of the volume you are deleting.

" }, - "FinalBackupTags":{"shape":"Tags"} + "FinalBackupTags":{"shape":"Tags"}, + "BypassSnaplockEnterpriseRetention":{ + "shape":"Flag", + "documentation":"

Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false.

For more information, see Deleting a SnapLock volume .

" + } }, - "documentation":"

Use to specify skipping a final backup, or to add tags to a final backup.

" + "documentation":"

Use to specify skipping a final backup, adding tags to a final backup, or bypassing the retention period of an FSx for ONTAP SnapLock Enterprise volume when deleting an FSx for ONTAP volume.

" }, "DeleteVolumeOntapResponse":{ "type":"structure", @@ -4106,6 +4176,10 @@ "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.

" + }, + "SnaplockConfiguration":{ + "shape":"SnaplockConfiguration", + "documentation":"

The SnapLock configuration object for an FSx for ONTAP SnapLock volume.

" } }, "documentation":"

The configuration of an Amazon FSx for NetApp ONTAP volume.

" @@ -4381,6 +4455,14 @@ "max":1000, "min":12 }, + "PrivilegedDelete":{ + "type":"string", + "enum":[ + "DISABLED", + "ENABLED", + "PERMANENTLY_DISABLED" + ] + }, "ProgressPercent":{ "type":"integer", "documentation":"

Displays the current percent of progress of an asynchronous task.

", @@ -4520,6 +4602,39 @@ } } }, + "RetentionPeriod":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"RetentionPeriodType", + "documentation":"

Defines the type of time for the retention period of an FSx for ONTAP SnapLock volume. Set it to one of the valid types. If you set it to INFINITE, the files are retained forever. If you set it to UNSPECIFIED, the files are retained until you set an explicit retention period.

" + }, + "Value":{ + "shape":"RetentionPeriodValue", + "documentation":"

Defines the amount of time for the retention period of an FSx for ONTAP SnapLock volume. You can't set a value for INFINITE or UNSPECIFIED. For all other options, the following ranges are valid:

" + } + }, + "documentation":"

Specifies the retention period of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. Files can't be deleted or modified during the retention period.

For more information, see Working with the retention period in SnapLock.

" + }, + "RetentionPeriodType":{ + "type":"string", + "enum":[ + "SECONDS", + "MINUTES", + "HOURS", + "DAYS", + "MONTHS", + "YEARS", + "INFINITE", + "UNSPECIFIED" + ] + }, + "RetentionPeriodValue":{ + "type":"integer", + "max":65535, + "min":0 + }, "RouteTableId":{ "type":"string", "max":21, @@ -4687,6 +4802,66 @@ "documentation":"

An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web Services Support.

", "exception":true }, + "SnaplockConfiguration":{ + "type":"structure", + "members":{ + "AuditLogVolume":{ + "shape":"Flag", + "documentation":"

Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is false. If you set AuditLogVolume to true, the SnapLock volume is created as an audit log volume. The minimum retention period for an audit log volume is six months.

For more information, see SnapLock audit log volumes.

" + }, + "AutocommitPeriod":{ + "shape":"AutocommitPeriod", + "documentation":"

The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.

" + }, + "PrivilegedDelete":{ + "shape":"PrivilegedDelete", + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + }, + "RetentionPeriod":{ + "shape":"SnaplockRetentionPeriod", + "documentation":"

Specifies the retention period of an FSx for ONTAP SnapLock volume.

" + }, + "SnaplockType":{ + "shape":"SnaplockType", + "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

" + }, + "VolumeAppendModeEnabled":{ + "shape":"Flag", + "documentation":"

Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to create WORM-appendable files and write data to them incrementally. The default value is false.

For more information, see Volume-append mode.

" + } + }, + "documentation":"

Specifies the SnapLock configuration for an FSx for ONTAP SnapLock volume.

" + }, + "SnaplockRetentionPeriod":{ + "type":"structure", + "required":[ + "DefaultRetention", + "MinimumRetention", + "MaximumRetention" + ], + "members":{ + "DefaultRetention":{ + "shape":"RetentionPeriod", + "documentation":"

The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to the minimum retention period and less than or equal to the maximum retention period.

" + }, + "MinimumRetention":{ + "shape":"RetentionPeriod", + "documentation":"

The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume.

" + }, + "MaximumRetention":{ + "shape":"RetentionPeriod", + "documentation":"

The longest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume.

" + } + }, + "documentation":"

The configuration to set the retention period of an FSx for ONTAP SnapLock volume. The retention period includes default, maximum, and minimum settings. For more information, see Working with the retention period in SnapLock.

" + }, + "SnaplockType":{ + "type":"string", + "enum":[ + "COMPLIANCE", + "ENTERPRISE" + ] + }, "Snapshot":{ "type":"structure", "members":{ @@ -5407,7 +5582,7 @@ }, "SecurityStyle":{ "shape":"SecurityStyle", - "documentation":"

The security style for the volume, which can be UNIX. NTFS, or MIXED.

" + "documentation":"

The security style for the volume, which can be UNIX, NTFS, or MIXED.

" }, "SizeInMegabytes":{ "shape":"VolumeCapacity", @@ -5428,6 +5603,10 @@ "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.

" + }, + "SnaplockConfiguration":{ + "shape":"UpdateSnaplockConfiguration", + "documentation":"

The configuration object for updating the SnapLock configuration of an FSx for ONTAP SnapLock volume.

" } }, "documentation":"

Used to specify changes to the ONTAP configuration for the volume you are updating.

" @@ -5466,6 +5645,32 @@ }, "documentation":"

Used to specify changes to the OpenZFS configuration for the volume that you are updating.

" }, + "UpdateSnaplockConfiguration":{ + "type":"structure", + "members":{ + "AuditLogVolume":{ + "shape":"Flag", + "documentation":"

Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default value is false. If you set AuditLogVolume to true, the SnapLock volume is created as an audit log volume. The minimum retention period for an audit log volume is six months.

For more information, see SnapLock audit log volumes.

" + }, + "AutocommitPeriod":{ + "shape":"AutocommitPeriod", + "documentation":"

The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.

" + }, + "PrivilegedDelete":{ + "shape":"PrivilegedDelete", + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + }, + "RetentionPeriod":{ + "shape":"SnaplockRetentionPeriod", + "documentation":"

Specifies the retention period of an FSx for ONTAP SnapLock volume.

" + }, + "VolumeAppendModeEnabled":{ + "shape":"Flag", + "documentation":"

Enables or disables volume-append mode on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to create WORM-appendable files and write data to them incrementally. The default value is false.

For more information, see Volume-append mode.

" + } + }, + "documentation":"

Updates the SnapLock configuration for an existing FSx for ONTAP volume.

" + }, "UpdateSnapshotRequest":{ "type":"structure", "required":[ @@ -5611,7 +5816,7 @@ }, "VolumeCapacity":{ "type":"integer", - "max":2147483647, + "max":314572800, "min":0 }, "VolumeFilter":{ diff --git a/botocore/data/iam/2010-05-08/service-2.json b/botocore/data/iam/2010-05-08/service-2.json index 0833ba4eb8..18ca161394 100644 --- a/botocore/data/iam/2010-05-08/service-2.json +++ b/botocore/data/iam/2010-05-08/service-2.json @@ -42,7 +42,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role, and this quota cannot be increased. You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of Amazon Web Services because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this operation must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, see Working with roles. For more information about instance profiles, see About instance profiles.

" + "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role, and this quota cannot be increased. You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of Amazon Web Services because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this operation must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, see IAM roles in the IAM User Guide. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

" }, "AddUserToGroup":{ "name":"AddUserToGroup", @@ -234,7 +234,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, Auth0, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" + "documentation":"

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Auth0, GitHub, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -296,7 +296,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new role for your Amazon Web Services account. For more information about roles, see IAM roles. For information about quotas for role names and the number of roles you can create, see IAM and STS quotas in the IAM User Guide.

" + "documentation":"

Creates a new role for your Amazon Web Services account.

For more information about roles, see IAM roles in the IAM User Guide. For information about quotas for role names and the number of roles you can create, see IAM and STS quotas in the IAM User Guide.

" }, "CreateSAMLProvider":{ "name":"CreateSAMLProvider", @@ -496,7 +496,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified instance profile. The instance profile must not have an associated role.

Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

For more information about instance profiles, see About instance profiles.

" + "documentation":"

Deletes the specified instance profile. The instance profile must not have an associated role.

Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

For more information about instance profiles, see Using instance profiles in the IAM User Guide.

" }, "DeleteLoginProfile":{ "name":"DeleteLoginProfile", @@ -1027,7 +1027,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see About instance profiles in the IAM User Guide.

" + "documentation":"

Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

" }, "GetLoginProfile":{ "name":"GetLoginProfile", @@ -1148,7 +1148,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see Working with roles.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

" + "documentation":"

Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see IAM roles in the IAM User Guide.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

" }, "GetRolePolicy":{ "name":"GetRolePolicy", @@ -1165,7 +1165,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the specified inline policy document that is embedded with the specified IAM role.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

For more information about roles, see Using roles to delegate permissions and federate identities.

" + "documentation":"

Retrieves the specified inline policy document that is embedded with the specified IAM role.

Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

For more information about policies, see Managed policies and inline policies in the IAM User Guide.

For more information about roles, see IAM roles in the IAM User Guide.

" }, "GetSAMLProvider":{ "name":"GetSAMLProvider", @@ -1491,7 +1491,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, see About instance profiles.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an instance profile, see GetInstanceProfile.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an instance profile, see GetInstanceProfile.

You can paginate the results using the MaxItems and Marker parameters.

" }, "ListInstanceProfilesForRole":{ "name":"ListInstanceProfilesForRole", @@ -1508,7 +1508,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to About instance profiles.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to Using instance profiles in the IAM User Guide.

You can paginate the results using the MaxItems and Marker parameters.

" }, "ListMFADeviceTags":{ "name":"ListMFADeviceTags", @@ -1696,7 +1696,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

To view all of the information for a role, see GetRole.

You can paginate the results using the MaxItems and Marker parameters.

" + "documentation":"

Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see IAM roles in the IAM User Guide.

IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

To view all of the information for a role, see GetRole.

You can paginate the results using the MaxItems and Marker parameters.

" }, "ListSAMLProviderTags":{ "name":"ListSAMLProviderTags", @@ -1923,7 +1923,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM role.

When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole . You can update a role's trust policy using UpdateAssumerolePolicy . For more information about IAM roles, see Using roles to delegate permissions and federate identities.

A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy . To create a new managed policy, use CreatePolicy . For information about policies, see Managed policies and inline policies in the IAM User Guide.

For information about the maximum number of inline policies that you can embed with a role, see IAM and STS quotas in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" + "documentation":"

Adds or updates an inline policy document that is embedded in the specified IAM role.

When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the role, using CreateRole . You can update a role's trust policy using UpdateAssumeRolePolicy . For more information about roles, see IAM roles in the IAM User Guide.

A role can also have a managed policy attached to it. To attach a managed policy to a role, use AttachRolePolicy . To create a new managed policy, use CreatePolicy . For information about policies, see Managed policies and inline policies in the IAM User Guide.

For information about the maximum number of inline policies that you can embed with a role, see IAM and STS quotas in the IAM User Guide.

Because policy documents can be large, you should use POST rather than GET when calling PutRolePolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

" }, "PutUserPermissionsBoundary":{ "name":"PutUserPermissionsBoundary", @@ -1982,7 +1982,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes the specified IAM role from the specified EC2 instance profile.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance might break any applications running on the instance.

For more information about IAM roles, see Working with roles. For more information about instance profiles, see About instance profiles.

" + "documentation":"

Removes the specified IAM role from the specified EC2 instance profile.

Make sure that you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance might break any applications running on the instance.

For more information about roles, see IAM roles in the IAM User Guide. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

" }, "RemoveUserFromGroup":{ "name":"RemoveUserFromGroup", @@ -2426,7 +2426,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, Auth0, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" + "documentation":"

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Auth0, GitHub, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

" }, "UpdateRole":{ "name":"UpdateRole", @@ -6834,7 +6834,7 @@ }, "PolicyDocument":{ "shape":"policyDocumentType", - "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to = IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" + "documentation":"

The policy document.

You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

" } } }, diff --git a/botocore/data/mediatailor/2018-04-23/service-2.json b/botocore/data/mediatailor/2018-04-23/service-2.json index 26e8f4aba6..9aa11e84a2 100644 --- a/botocore/data/mediatailor/2018-04-23/service-2.json +++ b/botocore/data/mediatailor/2018-04-23/service-2.json @@ -621,6 +621,10 @@ "shape":"__string", "documentation":"

If an alert is generated for a resource, an explanation of the reason for the alert.

" }, + "Category":{ + "shape":"AlertCategory", + "documentation":"

The category that MediaTailor assigns to the alert.

" + }, "LastModifiedTime":{ "shape":"__timestampUnix", "documentation":"

The timestamp when the alert was last modified.

" @@ -636,6 +640,14 @@ }, "documentation":"

Alert configuration parameters.

" }, + "AlertCategory":{ + "type":"string", + "enum":[ + "SCHEDULING_ERROR", + "PLAYBACK_WARNING", + "INFO" + ] + }, "AvailMatchingCriteria":{ "type":"structure", "required":[ @@ -2151,7 +2163,7 @@ }, "Value":{ "shape":"String", - "documentation":"

For SCTE35_ENHANCED output, defines a vaue. MediaTailor; takes this value, and its associated key, and generates the key/value pair within the EXT-X-ASSETtag. If you specify a value, you must also specify a corresponding key.

" + "documentation":"

For SCTE35_ENHANCED output, defines a value. MediaTailor; takes this value, and its associated key, and generates the key/value pair within the EXT-X-ASSETtag. If you specify a value, you must also specify a corresponding key.

" } }, "documentation":"

For SCTE35_ENHANCED output, defines a key and corresponding value. MediaTailor generates these pairs within the EXT-X-ASSETtag.

" diff --git a/botocore/data/personalize/2018-05-22/service-2.json b/botocore/data/personalize/2018-05-22/service-2.json index a2db3f61df..8ba03fa9f1 100644 --- a/botocore/data/personalize/2018-05-22/service-2.json +++ b/botocore/data/personalize/2018-05-22/service-2.json @@ -1017,6 +1017,22 @@ "documentation":"

Updates a campaign by either deploying a new solution or changing the value of the campaign's minProvisionedTPS parameter.

To update a campaign, the campaign status must be ACTIVE or CREATE FAILED. Check the campaign status using the DescribeCampaign operation.

You can still get recommendations from a campaign while an update is in progress. The campaign will use the previous solution version and campaign configuration to generate recommendations until the latest campaign update status is Active.

For more information on campaigns, see CreateCampaign.

", "idempotent":true }, + "UpdateDataset":{ + "name":"UpdateDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDatasetRequest"}, + "output":{"shape":"UpdateDatasetResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Update a dataset to replace its schema with a new or existing one. For more information, see Replacing a dataset's schema.

", + "idempotent":true + }, "UpdateMetricAttribution":{ "name":"UpdateMetricAttribution", "http":{ @@ -2094,7 +2110,7 @@ }, "recipeArn":{ "shape":"Arn", - "documentation":"

The ARN of the recipe to use for model training. Only specified when performAutoML is false.

" + "documentation":"

The ARN of the recipe to use for model training. This is required when performAutoML is false.

" }, "datasetGroupArn":{ "shape":"Arn", @@ -2198,6 +2214,10 @@ "lastUpdatedDateTime":{ "shape":"Date", "documentation":"

A time stamp that shows when the dataset was updated.

" + }, + "latestDatasetUpdate":{ + "shape":"DatasetUpdateSummary", + "documentation":"

Describes the latest update to the dataset.

" } }, "documentation":"

Provides metadata for a dataset.

" @@ -2551,6 +2571,32 @@ "type":"string", "max":256 }, + "DatasetUpdateSummary":{ + "type":"structure", + "members":{ + "schemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the schema that replaced the previous schema of the dataset.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the dataset update.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If updating a dataset fails, provides the reason why.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The creation date and time (in Unix time) of the dataset update.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The last update date and time (in Unix time) of the dataset.

" + } + }, + "documentation":"

Describes an update to a dataset.

" + }, "Datasets":{ "type":"list", "member":{"shape":"DatasetSummary"}, @@ -4456,7 +4502,7 @@ }, "recipeArn":{ "shape":"Arn", - "documentation":"

The ARN of the recipe used to create the solution.

" + "documentation":"

The ARN of the recipe used to create the solution. This is required when performAutoML is false.

" }, "datasetGroupArn":{ "shape":"Arn", @@ -4892,6 +4938,32 @@ } } }, + "UpdateDatasetRequest":{ + "type":"structure", + "required":[ + "datasetArn", + "schemaArn" + ], + "members":{ + "datasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset that you want to update.

" + }, + "schemaArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the new schema you want use.

" + } + } + }, + "UpdateDatasetResponse":{ + "type":"structure", + "members":{ + "datasetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset you updated.

" + } + } + }, "UpdateMetricAttributionRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/proton/2020-07-20/paginators-1.json b/botocore/data/proton/2020-07-20/paginators-1.json index 450f2e2908..a52075c41e 100644 --- a/botocore/data/proton/2020-07-20/paginators-1.json +++ b/botocore/data/proton/2020-07-20/paginators-1.json @@ -110,6 +110,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "components" + }, + "ListDeployments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "deployments" } } } diff --git a/botocore/data/proton/2020-07-20/service-2.json b/botocore/data/proton/2020-07-20/service-2.json index 35dbe3926f..d669f9a5f8 100644 --- a/botocore/data/proton/2020-07-20/service-2.json +++ b/botocore/data/proton/2020-07-20/service-2.json @@ -356,6 +356,24 @@ "documentation":"

Delete an Proton component resource.

For more information about components, see Proton components in the Proton User Guide.

", "idempotent":true }, + "DeleteDeployment":{ + "name":"DeleteDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDeploymentInput"}, + "output":{"shape":"DeleteDeploymentOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete the deployment.

", + "idempotent":true + }, "DeleteEnvironment":{ "name":"DeleteEnvironment", "http":{ @@ -580,6 +598,23 @@ ], "documentation":"

Get detailed data for a component.

For more information about components, see Proton components in the Proton User Guide.

" }, + "GetDeployment":{ + "name":"GetDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDeploymentInput"}, + "output":{"shape":"GetDeploymentOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get detailed data for a deployment.

" + }, "GetEnvironment":{ "name":"GetEnvironment", "http":{ @@ -901,6 +936,23 @@ ], "documentation":"

List components with summary data. You can filter the result list by environment, service, or a single service instance.

For more information about components, see Proton components in the Proton User Guide.

" }, + "ListDeployments":{ + "name":"ListDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeploymentsInput"}, + "output":{"shape":"ListDeploymentsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List deployments. You can filter the result list by environment, service, or a single service instance.

" + }, "ListEnvironmentAccountConnections":{ "name":"ListEnvironmentAccountConnections", "http":{ @@ -1763,6 +1815,10 @@ "shape":"ResourceName", "documentation":"

The name of the Proton environment that this component is associated with.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this component.

" + }, "lastClientRequestToken":{ "shape":"String", "documentation":"

The last token the client requested.

" @@ -1779,6 +1835,10 @@ "shape":"Timestamp", "documentation":"

The time when the component was last modified.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this component.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the component.

" @@ -1799,6 +1859,12 @@ "documentation":"

Detailed data of an Proton component resource.

For more information about components, see Proton components in the Proton User Guide.

" }, "ComponentArn":{"type":"string"}, + "ComponentDeploymentIdList":{ + "type":"list", + "member":{"shape":"DeploymentId"}, + "max":1, + "min":0 + }, "ComponentDeploymentUpdateType":{ "type":"string", "enum":[ @@ -1806,6 +1872,28 @@ "CURRENT_VERSION" ] }, + "ComponentState":{ + "type":"structure", + "members":{ + "serviceInstanceName":{ + "shape":"ResourceNameOrEmpty", + "documentation":"

The name of the service instance that this component is attached to. Provided when a component is attached to a service instance.

" + }, + "serviceName":{ + "shape":"ResourceNameOrEmpty", + "documentation":"

The name of the service that serviceInstanceName is associated with. Provided when a component is attached to a service instance.

" + }, + "serviceSpec":{ + "shape":"SpecContents", + "documentation":"

The service spec that the component uses to access service inputs. Provided when a component is attached to a service instance.

" + }, + "templateFile":{ + "shape":"TemplateFileContents", + "documentation":"

The template file used.

" + } + }, + "documentation":"

The detailed data about the current state of the component.

" + }, "ComponentSummary":{ "type":"structure", "required":[ @@ -1837,6 +1925,10 @@ "shape":"ResourceName", "documentation":"

The name of the Proton environment that this component is associated with.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this component.

" + }, "lastDeploymentAttemptedAt":{ "shape":"Timestamp", "documentation":"

The time when a deployment of the component was last attempted.

" @@ -1849,6 +1941,10 @@ "shape":"Timestamp", "documentation":"

The time when the component was last modified.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this component.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the component.

" @@ -2521,6 +2617,25 @@ } } }, + "DeleteDeploymentInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment to delete.

" + } + } + }, + "DeleteDeploymentOutput":{ + "type":"structure", + "members":{ + "deployment":{ + "shape":"Deployment", + "documentation":"

The detailed data of the deployment being deleted.

" + } + } + }, "DeleteEnvironmentAccountConnectionInput":{ "type":"structure", "required":["id"], @@ -2749,10 +2864,123 @@ } } }, + "Deployment":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "deploymentStatus", + "environmentName", + "id", + "lastModifiedAt", + "targetArn", + "targetResourceCreatedAt", + "targetResourceType" + ], + "members":{ + "arn":{ + "shape":"DeploymentArn", + "documentation":"

The Amazon Resource Name (ARN) of the deployment.

" + }, + "completedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was completed.

" + }, + "componentName":{ + "shape":"ResourceName", + "documentation":"

The name of the component associated with this deployment.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was created.

" + }, + "deploymentStatus":{ + "shape":"DeploymentStatus", + "documentation":"

The status of the deployment.

" + }, + "deploymentStatusMessage":{ + "shape":"StatusMessage", + "documentation":"

The deployment status message.

" + }, + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The name of the environment associated with this deployment.

" + }, + "id":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment.

" + }, + "initialState":{ + "shape":"DeploymentState", + "documentation":"

The initial state of the target resource at the time of the deployment.

" + }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment.

" + }, + "lastModifiedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was last modified.

" + }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The name of the deployment's service instance.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service in this deployment.

" + }, + "targetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the target of the deployment.

" + }, + "targetResourceCreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the depoyment target was created.

" + }, + "targetResourceType":{ + "shape":"DeploymentTargetResourceType", + "documentation":"

The resource type of the deployment target. It can be an environment, service, service instance, or component.

" + }, + "targetState":{ + "shape":"DeploymentState", + "documentation":"

The target state of the target resource at the time of the deployment.

" + } + }, + "documentation":"

The detailed information about a deployment.

" + }, + "DeploymentArn":{"type":"string"}, "DeploymentId":{ "type":"string", "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" }, + "DeploymentState":{ + "type":"structure", + "members":{ + "component":{ + "shape":"ComponentState", + "documentation":"

The state of the component associated with the deployment.

" + }, + "environment":{ + "shape":"EnvironmentState", + "documentation":"

The state of the environment associated with the deployment.

" + }, + "serviceInstance":{ + "shape":"ServiceInstanceState", + "documentation":"

The state of the service instance associated with the deployment.

" + }, + "servicePipeline":{ + "shape":"ServicePipelineState", + "documentation":"

The state of the service pipeline associated with the deployment.

" + } + }, + "documentation":"

The detailed data about the current state of the deployment.

", + "union":true + }, "DeploymentStatus":{ "type":"string", "enum":[ @@ -2766,6 +2994,96 @@ "CANCELLED" ] }, + "DeploymentSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "deploymentStatus", + "environmentName", + "id", + "lastModifiedAt", + "targetArn", + "targetResourceCreatedAt", + "targetResourceType" + ], + "members":{ + "arn":{ + "shape":"DeploymentArn", + "documentation":"

The Amazon Resource Name (ARN) of the deployment.

" + }, + "completedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was completed.

" + }, + "componentName":{ + "shape":"ResourceName", + "documentation":"

The name of the component associated with the deployment.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was created.

" + }, + "deploymentStatus":{ + "shape":"DeploymentStatus", + "documentation":"

The current status of the deployment.

" + }, + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The name of the environment associated with the deployment.

" + }, + "id":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment.

" + }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment.

" + }, + "lastModifiedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the deployment was last modified.

" + }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service instance associated with the deployment.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service associated with the deployment.

" + }, + "targetArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the target of the deployment.

" + }, + "targetResourceCreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the target resource was created.

" + }, + "targetResourceType":{ + "shape":"DeploymentTargetResourceType", + "documentation":"

The resource type of the deployment target. It can be an environment, service, service instance, or component.

" + } + }, + "documentation":"

Summary data of the deployment.

" + }, + "DeploymentSummaryList":{ + "type":"list", + "member":{"shape":"DeploymentSummary"} + }, + "DeploymentTargetResourceType":{ + "type":"string", + "enum":[ + "ENVIRONMENT", + "SERVICE_PIPELINE", + "SERVICE_INSTANCE", + "COMPONENT" + ] + }, "DeploymentUpdateType":{ "type":"string", "enum":[ @@ -2842,6 +3160,10 @@ "shape":"AwsAccountId", "documentation":"

The ID of the environment account that the environment infrastructure resources are provisioned in.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this environment.

" + }, "lastDeploymentAttemptedAt":{ "shape":"Timestamp", "documentation":"

The time when a deployment of the environment was last attempted.

" @@ -2850,6 +3172,10 @@ "shape":"Timestamp", "documentation":"

The time when the environment was last deployed successfully.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this environment.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the environment.

" @@ -3032,6 +3358,33 @@ "member":{"shape":"EnvironmentAccountConnectionSummary"} }, "EnvironmentArn":{"type":"string"}, + "EnvironmentState":{ + "type":"structure", + "required":[ + "templateMajorVersion", + "templateMinorVersion", + "templateName" + ], + "members":{ + "spec":{ + "shape":"SpecContents", + "documentation":"

The environment spec that was used to create the environment.

" + }, + "templateMajorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The major version of the environment template that was used to create the environment.

" + }, + "templateMinorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The minor version of the environment template that was used to create the environment.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The name of the environment template that was used to create the environment.

" + } + }, + "documentation":"

The detailed data about the current state of the environment.

" + }, "EnvironmentSummary":{ "type":"structure", "required":[ @@ -3078,6 +3431,10 @@ "shape":"AwsAccountId", "documentation":"

The ID of the environment account that the environment infrastructure resources are provisioned in.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this environment.

" + }, "lastDeploymentAttemptedAt":{ "shape":"Timestamp", "documentation":"

The time when a deployment of the environment was last attempted.

" @@ -3086,6 +3443,10 @@ "shape":"Timestamp", "documentation":"

The time when the environment was last deployed successfully.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this environment.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the environment.

" @@ -3398,6 +3759,41 @@ } } }, + "GetDeploymentInput":{ + "type":"structure", + "required":["id"], + "members":{ + "componentName":{ + "shape":"ResourceName", + "documentation":"

The name of a component that you want to get the detailed data for.

" + }, + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The name of a environment that you want to get the detailed data for.

" + }, + "id":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment that you want to get the detailed data for.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service instance associated with the given deployment ID. serviceName must be specified to identify the service instance.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service associated with the given deployment ID.

" + } + } + }, + "GetDeploymentOutput":{ + "type":"structure", + "members":{ + "deployment":{ + "shape":"Deployment", + "documentation":"

The detailed data of the requested deployment.

" + } + } + }, "GetEnvironmentAccountConnectionInput":{ "type":"structure", "required":["id"], @@ -3839,6 +4235,10 @@ "shape":"ResourceName", "documentation":"

The name of the component whose outputs you want.

" }, + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment whose outputs you want.

" + }, "nextToken":{ "shape":"EmptyNextToken", "documentation":"

A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested.

" @@ -3926,6 +4326,49 @@ } } }, + "ListDeploymentsInput":{ + "type":"structure", + "members":{ + "componentName":{ + "shape":"ResourceName", + "documentation":"

The name of a component for result list filtering. Proton returns deployments associated with that component.

" + }, + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The name of an environment for result list filtering. Proton returns deployments associated with the environment.

" + }, + "maxResults":{ + "shape":"MaxPageResults", + "documentation":"

The maximum number of deployments to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that indicates the location of the next deployment in the array of deployment, after the list of deployment that was previously requested.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The name of a service instance for result list filtering. Proton returns the deployments associated with the service instance.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The name of a service for result list filtering. Proton returns deployments associated with service instances of the service.

" + } + } + }, + "ListDeploymentsOutput":{ + "type":"structure", + "required":["deployments"], + "members":{ + "deployments":{ + "shape":"DeploymentSummaryList", + "documentation":"

An array of deployment with summary data.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token that indicates the location of the next deployment in the array of deployment, after the current requested list of deployment.

" + } + } + }, "ListEnvironmentAccountConnectionsInput":{ "type":"structure", "required":["requestedBy"], @@ -3970,6 +4413,10 @@ "type":"structure", "required":["environmentName"], "members":{ + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment whose outputs you want.

" + }, "environmentName":{ "shape":"ResourceName", "documentation":"

The environment name.

" @@ -4190,6 +4637,10 @@ "serviceName" ], "members":{ + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment whose outputs you want.

" + }, "nextToken":{ "shape":"EmptyNextToken", "documentation":"

A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested.

" @@ -4346,6 +4797,10 @@ "type":"structure", "required":["serviceName"], "members":{ + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the deployment you want the outputs for.

" + }, "nextToken":{ "shape":"EmptyNextToken", "documentation":"

A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested.

" @@ -5220,6 +5675,10 @@ "shape":"ResourceName", "documentation":"

The name of the environment that the service instance was deployed into.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this service instance.

" + }, "lastClientRequestToken":{ "shape":"String", "documentation":"

The last client request token received.

" @@ -5232,6 +5691,10 @@ "shape":"Timestamp", "documentation":"

The time when the service instance was last deployed successfully.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this service instance.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the service instance.

" @@ -5260,6 +5723,46 @@ "documentation":"

Detailed data of an Proton service instance resource.

" }, "ServiceInstanceArn":{"type":"string"}, + "ServiceInstanceState":{ + "type":"structure", + "required":[ + "spec", + "templateMajorVersion", + "templateMinorVersion", + "templateName" + ], + "members":{ + "lastSuccessfulComponentDeploymentIds":{ + "shape":"ComponentDeploymentIdList", + "documentation":"

The IDs for the last successful components deployed for this service instance.

" + }, + "lastSuccessfulEnvironmentDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID for the last successful environment deployed for this service instance.

" + }, + "lastSuccessfulServicePipelineDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID for the last successful service pipeline deployed for this service instance.

" + }, + "spec":{ + "shape":"SpecContents", + "documentation":"

The service spec that was used to create the service instance.

" + }, + "templateMajorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The major version of the service template that was used to create the service pipeline.

" + }, + "templateMinorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The minor version of the service template that was used to create the service pipeline.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The name of the service template that was used to create the service instance.

" + } + }, + "documentation":"

The detailed data about the current state of this service instance.

" + }, "ServiceInstanceSummary":{ "type":"structure", "required":[ @@ -5296,6 +5799,10 @@ "shape":"ResourceName", "documentation":"

The name of the environment that the service instance was deployed into.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this service instance.

" + }, "lastDeploymentAttemptedAt":{ "shape":"Timestamp", "documentation":"

The time when a deployment of the service was last attempted.

" @@ -5304,6 +5811,10 @@ "shape":"Timestamp", "documentation":"

The time when the service was last deployed successfully.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this service instance.

" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the service instance.

" @@ -5360,6 +5871,10 @@ "shape":"StatusMessage", "documentation":"

A service pipeline deployment status message.

" }, + "lastAttemptedDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last attempted deployment of this service pipeline.

" + }, "lastDeploymentAttemptedAt":{ "shape":"Timestamp", "documentation":"

The time when a deployment of the service pipeline was last attempted.

" @@ -5368,6 +5883,10 @@ "shape":"Timestamp", "documentation":"

The time when the service pipeline was last deployed successfully.

" }, + "lastSucceededDeploymentId":{ + "shape":"DeploymentId", + "documentation":"

The ID of the last successful deployment of this service pipeline.

" + }, "spec":{ "shape":"SpecContents", "documentation":"

The service spec that was used to create the service pipeline.

" @@ -5387,6 +5906,33 @@ }, "documentation":"

Detailed data of an Proton service instance pipeline resource.

" }, + "ServicePipelineState":{ + "type":"structure", + "required":[ + "templateMajorVersion", + "templateMinorVersion", + "templateName" + ], + "members":{ + "spec":{ + "shape":"SpecContents", + "documentation":"

The service spec that was used to create the service pipeline.

" + }, + "templateMajorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The major version of the service template that was used to create the service pipeline.

" + }, + "templateMinorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The minor version of the service template that was used to create the service pipeline.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The name of the service template that was used to create the service pipeline.

" + } + }, + "documentation":"

The detailed data about the current state of the service pipeline.

" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["message"], diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 348fbc8a56..462d9de782 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

Checksums

When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

Storage Class Options

You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

Versioning

By default, x-amz-copy-source header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

The following operations are related to CopyObject:

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

Checksums

When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

Storage Class Options

You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

Versioning

By default, x-amz-copy-source header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

The following operations are related to CopyObject:

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -1499,7 +1499,9 @@ "us-gov-east-1", "us-gov-west-1", "us-west-1", - "us-west-2" + "us-west-2", + "ap-south-2", + "eu-south-2" ] }, "BucketLoggingStatus":{ @@ -5723,7 +5725,9 @@ "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", "BucketKeyStatus", - "ChecksumAlgorithm" + "ChecksumAlgorithm", + "ObjectAccessControlList", + "ObjectOwner" ] }, "InventoryOptionalFields":{ diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 90a0e3b8a0..84e51ee060 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -5833,6 +5833,10 @@ "ModelRegisterSettings":{ "shape":"ModelRegisterSettings", "documentation":"

The model registry settings for the SageMaker Canvas application.

" + }, + "WorkspaceSettings":{ + "shape":"WorkspaceSettings", + "documentation":"

The workspace settings for the SageMaker Canvas application.

" } }, "documentation":"

The SageMaker Canvas application settings.

" @@ -29678,7 +29682,10 @@ "WaitIntervalInSeconds" ], "members":{ - "MaximumBatchSize":{"shape":"CapacitySize"}, + "MaximumBatchSize":{ + "shape":"CapacitySize", + "documentation":"

Batch size for each rolling step to provision capacity and turn on traffic on the new endpoint fleet, and terminate capacity on the old endpoint fleet. Value must be between 5% to 50% of the variant's total instance count.

" + }, "WaitIntervalInSeconds":{ "shape":"WaitIntervalInSeconds", "documentation":"

The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet.

" @@ -29687,7 +29694,10 @@ "shape":"MaximumExecutionTimeoutInSeconds", "documentation":"

The time limit for the total deployment. Exceeding this limit causes a timeout.

" }, - "RollbackMaximumBatchSize":{"shape":"CapacitySize"} + "RollbackMaximumBatchSize":{ + "shape":"CapacitySize", + "documentation":"

Batch size for rollback to the old endpoint fleet. Each rolling step to provision capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new endpoint fleet. If this field is absent, the default value will be set to 100% of total capacity which means to bring up the whole capacity of the old fleet at once during rollback.

" + } }, "documentation":"

Specifies a rolling deployment strategy for updating a SageMaker endpoint.

" }, @@ -31430,7 +31440,7 @@ "members":{ "FeatureSpecificationS3Uri":{ "shape":"S3Uri", - "documentation":"

A URL to the Amazon S3 data source containing additional selected features that complement the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig.

You can input FeatureAttributeNames (optional) in JSON format as shown below:

{ \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

You can also specify the data type of the feature (optional) in the format shown below:

{ \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }

Autopilot supports the following data types: numeric, categorical, text, and datetime.

These column keys must not include any column set in TimeSeriesConfig.

When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig.

Autopilot supports the following data types: numeric, categorical, text, and datetime.

" + "documentation":"

A URL to the Amazon S3 data source containing additional selected features that complement the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig.

You can input FeatureAttributeNames (optional) in JSON format as shown below:

{ \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

You can also specify the data type of the feature (optional) in the format shown below:

{ \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }

Autopilot supports the following data types: numeric, categorical, text, and datetime.

These column keys must not include any column set in TimeSeriesConfig.

" }, "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, "ForecastFrequency":{ @@ -34517,6 +34527,20 @@ "type":"list", "member":{"shape":"Workforce"} }, + "WorkspaceSettings":{ + "type":"structure", + "members":{ + "S3ArtifactPath":{ + "shape":"S3Uri", + "documentation":"

The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location.

" + }, + "S3KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Web Services Key Management Service (KMS) encryption key ID that is used to encrypt artifacts generated by Canvas in the Amazon S3 bucket.

" + } + }, + "documentation":"

The workspace settings for the SageMaker Canvas application.

" + }, "Workteam":{ "type":"structure", "required":[ diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 8ffaf29aac..ef5169e86f 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -345,7 +345,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey and kms:Decrypt permissions on the key. For more information, see Secret encryption and decryption.

" + "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

To change a secret so that it is managed by another service, you need to recreate the secret in that service. See Secrets Manager secrets managed by other Amazon Web Services services.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information, see Secret encryption and decryption.

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -1665,7 +1665,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. For more information about versions and staging labels, see Concepts: Version.

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases.

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.

" + "documentation":"

The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version.

A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases.

If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.

" }, "SecretBinary":{ "shape":"SecretBinaryType", diff --git a/docs/source/conf.py b/docs/source/conf.py index 676883b794..559c180550 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.31' # The full version, including alpha/beta/rc tags. -release = '1.31.2' +release = '1.31.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/endpoint-rules/proton/endpoint-tests-1.json b/tests/functional/endpoint-rules/proton/endpoint-tests-1.json index fd7da6d333..2ea3a3dcf5 100644 --- a/tests/functional/endpoint-rules/proton/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/proton/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -214,9 +214,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -227,9 +227,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -238,9 +238,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -251,9 +251,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -262,9 +262,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -275,9 +275,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -286,9 +286,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -299,9 +299,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -312,9 +312,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -337,9 +337,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -349,9 +349,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } },