diff --git a/.all-contributorsrc b/.all-contributorsrc index c927666bb1a9..eac27828eed2 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -796,6 +796,24 @@ "contributions": [ "code" ] + }, + { + "login": "scrocquesel", + "name": "Sébastien Crocquesel", + "avatar_url": "https://avatars.githubusercontent.com/u/88554524?v=4", + "profile": "https://www.inulogic.fr", + "contributions": [ + "code" + ] + }, + { + "login": "dave-fn", + "name": "David Negrete", + "avatar_url": "https://avatars.githubusercontent.com/u/21349334?v=4", + "profile": "https://github.com/dave-fn", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -804,5 +822,6 @@ "repoType": "github", "repoHost": "https://github.com", "skipCi": true, - "commitConvention": "angular" + "commitConvention": "angular", + "commitType": "docs" } diff --git a/.brazil.json b/.brazil.json index 9059e5646031..1f0931d0747b 100644 --- a/.brazil.json +++ b/.brazil.json @@ -109,6 +109,7 @@ "io.netty:netty-common": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-handler": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-resolver": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-resolver-dns": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport-classes-epoll": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport-native-unix-common": { "packageName": "Netty4", "packageVersion": "4.1" }, diff --git a/.changes/2.20.68.json b/.changes/2.20.68.json new file mode 100644 index 000000000000..1b3791065e1b --- /dev/null +++ b/.changes/2.20.68.json @@ -0,0 +1,84 @@ +{ + "version": "2.20.68", + "date": "2023-05-18", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "This update fixes an issue where CompletableFutures are leaked/never completed when the submission to the FUTURE_COMPLETE_EXECUTOR is rejected.\n\nBy default, the SDK uses `2 * number of cores` (with a maximum of 64), and uses bounded queue of size 1000. In cases where the throughput to the client exceeds the executor's ability to keep up, it would reject executions. Before this change this would lead to leaked futures." + }, + { + "type": "bugfix", + "category": "S3 Transfer Manager", + "contributor": "", + "description": "Fixed the issue where S3 Transfer Manager attempted to load AWS CRT classes when Java based S3 client was used. See [#3936](https://github.com/aws/aws-sdk-java-v2/issues/3936)." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "Add ConflictException to PutEventSelectors, add (Channel/EDS)ARNInvalidException to Tag APIs. These exceptions provide customers with more specific error messages instead of internal errors." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "In this launch, we add support for showing integration status with external metric providers such as Instana, Datadog ...etc in GetEC2InstanceRecommendations and ExportEC2InstanceRecommendations apis" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release introduces a new MXF Profile for XDCAM which is strictly compliant with the SMPTE RDD 9 standard and improved handling of output name modifiers." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "contributor": "", + "description": "API updates for the AWS Security Token Service" + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Removing SparkProperties from EngineConfiguration object for StartSession API call" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "You can programmatically create and manage prompts using APIs, for example, to extract prompts stored within Amazon Connect and add them to your Amazon S3 bucket. AWS CloudTrail, AWS CloudFormation and tagging are supported." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation only release to address various tickets." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add support for i4g.large, i4g.xlarge, i4g.2xlarge, i4g.4xlarge, i4g.8xlarge and i4g.16xlarge instances powered by AWS Graviton2 processors that deliver up to 15% better compute performance than our other storage-optimized instances." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "RDS documentation update for the EngineVersion parameter of ModifyDBSnapshot" + }, + { + "type": "feature", + "category": "Amazon SageMaker geospatial capabilities", + "contributor": "", + "description": "This release makes ExecutionRoleArn a required field in the StartEarthObservationJob API." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.69.json b/.changes/2.20.69.json new file mode 100644 index 000000000000..17e6dc9949f9 --- /dev/null +++ b/.changes/2.20.69.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.69", + "date": "2023-05-19", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "Add ResourceArn, ResourceType, and BackupVaultName to ListRecoveryPointsByLegalHold API response." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage v2", + "contributor": "", + "description": "Adds support for the MediaPackage Live v2 API" + }, + { + "type": "feature", + "category": "Amazon Connect Cases", + "contributor": "", + "description": "This release adds the ability to create fields with type Url through the CreateField API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html" + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "This release allows customers to update scaling mode property of dedicated IP pools with PutDedicatedIpPoolScalingAttributes call." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.70.json b/.changes/2.20.70.json new file mode 100644 index 000000000000..23ab3ff3f262 --- /dev/null +++ b/.changes/2.20.70.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.70", + "date": "2023-05-22", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "Added support for tags on restore." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "scrocquesel", + "description": "Add client configuration overriding of SCHEDULED_EXECUTOR_SERVICE option" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "This commit adds a new ErrorType core metric that is recorded for all failed API call attempts. The ErrorType records the general category of error that ocurred for a failed API call attempt. Those categories are:\n\n - Throttling errors\n - Service errors other than throttling\n - I/O errors\n - API call or API call attempt timeouts\n\n The intent of this metric is to help locate possible issues at a glance and help direct further debugging or investigation." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "contributor": "", + "description": "Amazon Pinpoint is deprecating the tags parameter in the UpdateSegment, UpdateCampaign, UpdateEmailTemplate, UpdateSmsTemplate, UpdatePushTemplate, UpdateInAppTemplate and UpdateVoiceTemplate. Amazon Pinpoint will end support tags parameter by May 22, 2023." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Add support for Asset Bundle, Geospatial Heatmaps." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.71.json b/.changes/2.20.71.json new file mode 100644 index 000000000000..63de87aa67f9 --- /dev/null +++ b/.changes/2.20.71.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.71", + "date": "2023-05-23", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Added ModelNameEquals, ModelPackageVersionArnEquals in request and ModelName, SamplePayloadUrl, ModelPackageVersionArn in response of ListInferenceRecommendationsJobs API. Added Invocation timestamps in response of DescribeInferenceRecommendationsJob API & ListInferenceRecommendationsJobSteps API." + }, + { + "type": "feature", + "category": "Amazon Translate", + "contributor": "", + "description": "Added support for calling TranslateDocument API." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "contributor": "", + "description": "Fixes issue that could cause calls to GetAdminScope and ListAdminAccountsForOrganization to return a 500 Internal Server error." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.72.json b/.changes/2.20.72.json new file mode 100644 index 000000000000..6ff0568bf94b --- /dev/null +++ b/.changes/2.20.72.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.72", + "date": "2023-05-24", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "This release introduces AppSync Merged APIs, which provide the ability to compose multiple source APIs into a single federated/merged API." + }, + { + "type": "feature", + "category": "AWS Cost and Usage Report Service", + "contributor": "", + "description": "Add support for split cost allocation data on a report." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Amazon Connect Evaluation Capabilities: validation improvements" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker now provides an instantaneous deployment recommendation through the DescribeModel API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.73.json b/.changes/2.20.73.json new file mode 100644 index 000000000000..c5f0134238f2 --- /dev/null +++ b/.changes/2.20.73.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.73", + "date": "2023-05-25", + "entries": [ + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "Add PollingDisabledAt time information in PipelineMetadata object of GetPipeline API." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values." + }, + { + "type": "feature", + "category": "AWS Migration Hub Refactor Spaces", + "contributor": "", + "description": "This SDK update allows for path parameter syntax to be passed to the CreateRoute API. Path parameter syntax require parameters to be enclosed in {} characters. This update also includes a new AppendSourcePath field which lets users forward the source path to the Service URL endpoint." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "GameLift FleetIQ users can now filter game server claim requests to exclude servers on instances that are draining." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Automatic Model Tuning now supports enabling Autotune for tuning jobs which can choose tuning job configurations." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "contributor": "", + "description": "With this release, ElastiCache customers will be able to use predefined metricType \"ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage\" for their ElastiCache instances." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.74.json b/.changes/2.20.74.json new file mode 100644 index 000000000000..aabdc6fbb1e2 --- /dev/null +++ b/.changes/2.20.74.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.74", + "date": "2023-05-26", + "entries": [ + { + "type": "feature", + "category": "AWS IoT Wireless", + "contributor": "", + "description": "Add Multicast Group support in Network Analyzer Configuration." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Documentation update for a new Initiation Method value in DescribeContact API" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Added ml.p4d and ml.inf1 as supported instance type families for SageMaker Notebook Instances." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.75.json b/.changes/2.20.75.json new file mode 100644 index 000000000000..9bd2fb1a23f2 --- /dev/null +++ b/.changes/2.20.75.json @@ -0,0 +1,78 @@ +{ + "version": "2.20.75", + "date": "2023-05-30", + "entries": [ + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added Runtime parameter to allow selection of Ray Runtime" + }, + { + "type": "feature", + "category": "AWS Ground Station", + "contributor": "", + "description": "Updating description of GetMinuteUsage to be clearer." + }, + { + "type": "feature", + "category": "AWS IoT FleetWise", + "contributor": "", + "description": "Campaigns now support selecting Timestream or S3 as the data destination, Signal catalogs now support \"Deprecation\" keyword released in VSS v2.1 and \"Comment\" keyword released in VSS v3.0" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Added new resource detail objects to ASFF, including resources for AwsGuardDutyDetector, AwsAmazonMqBroker, AwsEventSchemasRegistry, AwsAppSyncGraphQlApi and AwsStepFunctionStateMachine." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "This SDK release provides customers the ability to use Header Order as a field to match." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Voice", + "contributor": "", + "description": "Added optional CallLeg field to StartSpeakerSearchTask API request" + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "This release adds API support for political views for the maps service APIs: CreateMap, UpdateMap, DescribeMap." + }, + { + "type": "feature", + "category": "Amazon MemoryDB", + "contributor": "", + "description": "Amazon MemoryDB for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0" + }, + { + "type": "feature", + "category": "Amazon Personalize", + "contributor": "", + "description": "This release provides support for the exclusion of certain columns for training when creating a solution and creating or updating a recommender with Amazon Personalize." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 2 new voices - Sofie (da-DK) and Niamh (en-IE)" + }, + { + "type": "feature", + "category": "Amazon Security Lake", + "contributor": "", + "description": "Log sources are now versioned. AWS log sources and custom sources will now come with a version identifier that enables producers to vend multiple schema versions to subscribers. Security Lake API have been refactored to more closely align with AWS API conventions." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.76.json b/.changes/2.20.76.json new file mode 100644 index 000000000000..1de8fa20aecf --- /dev/null +++ b/.changes/2.20.76.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.76", + "date": "2023-05-31", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix an issue where the optimal number of parts calculated could be higher than 10,000" + }, + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Resource Types Exclusion feature launch by AWS Config" + }, + { + "type": "feature", + "category": "AWSMainframeModernization", + "contributor": "", + "description": "Adds an optional create-only 'roleArn' property to Application resources. Enables PS and PO data set org types." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Documentation updates for ServiceCatalog." + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "contributor": "", + "description": "This release enables publishing event predictions from Amazon Fraud Detector (AFD) to Amazon EventBridge. For example, after getting predictions from AFD, Amazon EventBridge rules can be configured to trigger notification through an SNS topic, send a message with SES, or trigger Lambda workflows." + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "This release adds a new request parameter to the CreateFHIRDatastore API operation. IdentityProviderConfiguration specifies how you want to authenticate incoming requests to your Healthlake Data Store." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for changing the engine for Oracle using the ModifyDbInstance API" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Web", + "contributor": "", + "description": "WorkSpaces Web now allows you to control which IP addresses your WorkSpaces Web portal may be accessed from." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.77.json b/.changes/2.20.77.json new file mode 100644 index 000000000000..9d11b565f6e7 --- /dev/null +++ b/.changes/2.20.77.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.77", + "date": "2023-06-01", + "entries": [ + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "Corrected the information for the header order FieldToMatch setting" + }, + { + "type": "feature", + "category": "Alexa For Business", + "contributor": "", + "description": "Alexa for Business has been deprecated and is no longer supported." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "Added ability to select DataTransferApiType for DescribeConnector and CreateFlow requests when using Async supported connectors. Added supportedDataTransferType to DescribeConnector/DescribeConnectors/ListConnector response." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This release introduces calculated attribute related APIs." + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "contributor": "", + "description": "API Update for IVS Advanced Channel type" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon Sagemaker Autopilot adds support for Parquet file input to NLP text classification jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.78.json b/.changes/2.20.78.json new file mode 100644 index 000000000000..da0c2eac15b5 --- /dev/null +++ b/.changes/2.20.78.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.78", + "date": "2023-06-02", + "entries": [ + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "This feature allows users to start and stop event ingestion on a CloudTrail Lake event data store." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "Added APIs to describe managed products. The APIs retrieve information about rule groups that are managed by AWS and by AWS Marketplace sellers." + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release adds Selective Execution feature that allows SageMaker Pipelines users to run selected steps in a pipeline." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.79.json b/.changes/2.20.79.json new file mode 100644 index 000000000000..b6b2fe779124 --- /dev/null +++ b/.changes/2.20.79.json @@ -0,0 +1,66 @@ +{ + "version": "2.20.79", + "date": "2023-06-05", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Upgrading AWS CRT dependency to v0.21.17. This version contains minor fixes and updates" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "AWS CloudFormation StackSets provides customers with three new APIs to activate, deactivate, and describe AWS Organizations trusted access which is needed to get started with service-managed StackSets." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Add Ruby 3.2 (ruby3.2) Runtime support to AWS Lambda." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Making InstanceTagAttribute as the required parameter for the DeregisterInstanceEventNotificationAttributes and RegisterInstanceEventNotificationAttributes APIs." + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "contributor": "", + "description": "Added new variable types, new DateTime data type, and new rules engine functions for interacting and working with DateTime data types." + }, + { + "type": "feature", + "category": "Amazon Keyspaces", + "contributor": "", + "description": "This release adds support for MRR GA launch, and includes multiregion support in create-keyspace, get-keyspace, and list-keyspace." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "This release adds ROLLING_BACK and CREATING_SNAPSHOT environment statuses for Amazon MWAA environments." + }, + { + "type": "feature", + "category": "FinSpace User Environment Management service", + "contributor": "", + "description": "Releasing new Managed kdb Insights APIs" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.80.json b/.changes/2.20.80.json new file mode 100644 index 000000000000..f7cd296e01ef --- /dev/null +++ b/.changes/2.20.80.json @@ -0,0 +1,72 @@ +{ + "version": "2.20.80", + "date": "2023-06-06", + "entries": [ + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "This release updates the AccountAlias regex pattern with the same length restrictions enforced by the length constraint." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Adding IoT Device Management Software Package Catalog APIs to register, store, and report system software packages, along with their versions and metadata in a centralized location." + }, + { + "type": "feature", + "category": "AWS IoT Data Plane", + "contributor": "", + "description": "Update thing shadow name regex to allow '$' character" + }, + { + "type": "feature", + "category": "AWS Signer", + "contributor": "", + "description": "AWS Signer is launching Container Image Signing, a new feature that enables you to sign and verify container images. This feature enables you to validate that only container images you approve are used in your enterprise." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "GetMetricDataV2 API is now available in AWS GovCloud(US) region." + }, + { + "type": "feature", + "category": "Amazon EMR", + "contributor": "", + "description": "This release provides customers the ability to specify an allocation strategies amongst PRICE_CAPACITY_OPTIMIZED, CAPACITY_OPTIMIZED, LOWEST_PRICE, DIVERSIFIED for Spot instances in Instance Feet cluster. This enables customers to choose an allocation strategy best suited for their workload." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "This release adds support for Lex Developers to create test sets and to execute those test-sets against their bots." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "QuickSight support for pivot table field collapse state, radar chart range scale and multiple scope options in conditional formatting." + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "contributor": "", + "description": "Amazon SQS adds three new APIs - StartMessageMoveTask, CancelMessageMoveTask, and ListMessageMoveTasks to automate redriving messages from dead-letter queues to source queues or a custom destination." + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "Adds new response properties and request parameters for 'last scanned at' on the ListCoverage operation. This feature allows you to search and view the date of which your resources were last scanned by Inspector." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.81.json b/.changes/2.20.81.json new file mode 100644 index 000000000000..edbde98b38f4 --- /dev/null +++ b/.changes/2.20.81.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.81", + "date": "2023-06-07", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "AWS CloudFormation StackSets is updating the deployment experience for all stackset operations to skip suspended AWS accounts during deployments. StackSets will skip target AWS accounts that are suspended and set the Detailed Status of the corresponding stack instances as SKIPPED_SUSPENDED_ACCOUNT" + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "contributor": "", + "description": "This update corrects the jumbo frames mtu values from 9100 to 8500 for transit virtual interfaces." + }, + { + "type": "feature", + "category": "AWS IoT Core Device Advisor", + "contributor": "", + "description": "AWS IoT Core Device Advisor now supports new Qualification Suite test case list. With this update, customers can more easily create new qualification test suite with an empty rootGroup input." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "This change adds support for account level data protection policies using 3 new APIs, PutAccountPolicy, DeleteAccountPolicy and DescribeAccountPolicy. DescribeLogGroup API has been modified to indicate if account level policy is applied to the LogGroup via \"inheritedProperties\" list in the response." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This release introduces event stream related APIs." + }, + { + "type": "feature", + "category": "Amazon EMR Containers", + "contributor": "", + "description": "EMR on EKS adds support for log rotation of Spark container logs with EMR-6.11.0 onwards, to the StartJobRun API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.82.json b/.changes/2.20.82.json new file mode 100644 index 000000000000..9ca6d3458ec4 --- /dev/null +++ b/.changes/2.20.82.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.82", + "date": "2023-06-08", + "entries": [ + { + "type": "feature", + "category": "AWS Comprehend Medical", + "contributor": "", + "description": "This release supports a new set of entities and traits." + }, + { + "type": "feature", + "category": "AWS STS", + "contributor": "", + "description": "Updates the core STS credential provider logic to return AwsSessionCredentials instead of an STS-specific class, and adds expirationTime to AwsSessionCredentials" + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "New parameter added in ServiceCatalog DescribeProvisioningArtifact api - IncludeProvisioningArtifactParameters. This parameter can be used to return information about the parameters used to provision the product" + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning." + }, + { + "type": "feature", + "category": "Amazon Timestream Write", + "contributor": "", + "description": "This release adds the capability for customers to define how their data should be partitioned, optimizing for certain access patterns. This definition will take place as a part of the table creation." + }, + { + "type": "feature", + "category": "Payment Cryptography Control Plane", + "contributor": "", + "description": "Initial release of AWS Payment Cryptography Control Plane service for creating and managing cryptographic keys used during card payment processing." + }, + { + "type": "feature", + "category": "Payment Cryptography Data Plane", + "contributor": "", + "description": "Initial release of AWS Payment Cryptography DataPlane Plane service for performing cryptographic operations typically used during card payment processing." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.83.json b/.changes/2.20.83.json new file mode 100644 index 000000000000..42dffd691342 --- /dev/null +++ b/.changes/2.20.83.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.83", + "date": "2023-06-09", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed issue with leased connection leaks when threads executing HTTP connections with Apache HttpClient were interrupted while the connection was in progress." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "martinKindall", + "description": "By default, Netty threads are blocked during dns resolution, namely InetAddress.getByName is used under the hood. Now, there's an option to configure the NettyNioAsyncHttpClient in order to use a non blocking dns resolution strategy." + }, + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "Document-only update to refresh CLI documentation for AWS Private CA. No change to the service." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.84.json b/.changes/2.20.84.json new file mode 100644 index 000000000000..297e632d6fb7 --- /dev/null +++ b/.changes/2.20.84.json @@ -0,0 +1,66 @@ +{ + "version": "2.20.84", + "date": "2023-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify UI Builder", + "contributor": "", + "description": "AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "`IdleConnectionReaper` now does not prevent `HttpClientConnectionManager` from getting GC'd in the case where an SDK client is created per request and not closed." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "StephenFlavin", + "description": "Add \"unsafe\" and \"fromRemaining\" AsyncRequestBody constructors for byte arrays and ByteBuffers" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Documentation updates for DynamoDB" + }, + { + "type": "feature", + "category": "Amazon DynamoDB Streams", + "contributor": "", + "description": "Documentation updates for DynamoDB Streams" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created." + }, + { + "type": "feature", + "category": "Amazon OpenSearch Service", + "contributor": "", + "description": "This release adds support for SkipUnavailable connection property for cross cluster search" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.85.json b/.changes/2.20.85.json new file mode 100644 index 000000000000..ce7650672d7d --- /dev/null +++ b/.changes/2.20.85.json @@ -0,0 +1,84 @@ +{ + "version": "2.20.85", + "date": "2023-06-13", + "entries": [ + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "This feature allows users to view dashboards for CloudTrail Lake event data stores." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add support for Security Hub Automation Rules" + }, + { + "type": "feature", + "category": "AWS SimSpace Weaver", + "contributor": "", + "description": "This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet." + }, + { + "type": "feature", + "category": "AWS Well-Architected Tool", + "contributor": "", + "description": "AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Security", + "contributor": "", + "description": "Initial release of Amazon CodeGuru Security APIs" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds pagination for the Get Certificates API operation." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Integrate double encryption feature to SDKs." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "GA release of Amazon Verified Permissions." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data." + }, + { + "type": "feature", + "category": "Elastic Disaster Recovery Service", + "contributor": "", + "description": "Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.86.json b/.changes/2.20.86.json new file mode 100644 index 000000000000..33a7fb16a578 --- /dev/null +++ b/.changes/2.20.86.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.86", + "date": "2023-06-15", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced", + "contributor": "breader124", + "description": "Thanks to this bugfix it'll be possible to create DynamoDB table containing\nsecondary indices when using no arugments `createTable` method from `DefaultDynamoDbTable`\nclass. Information about their presence might be expressed using annotations, but it was ignored\nand created tables didn't contain specified indices. Plase note that it is still not possible\nto specify projections for indices using annotations. By default, all fields will be projected." + }, + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "This release introduces 2 Audit Manager features: CSV exports and new manual evidence options. You can now export your evidence finder results in CSV format. In addition, you can now add manual evidence to a control by entering free-form text or uploading a file from your browser." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "contributor": "", + "description": "Documentation updates for EFS." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Updated descriptions for some APIs." + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "Amazon Location Service adds categories to places, including filtering on those categories in searches. Also, you can now add metadata properties to your geofences." + }, + { + "type": "feature", + "category": "DynamoDB Enhanced Client", + "contributor": "bmaizels", + "description": "Add EnhancedType parameters to static builder methods of StaticTableSchema and StaticImmitableTableSchema" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.87.json b/.changes/2.20.87.json new file mode 100644 index 000000000000..78a820206e9e --- /dev/null +++ b/.changes/2.20.87.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.87", + "date": "2023-06-16", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "contributor": "martinKindall", + "description": "Created static method EnumAttributeConverter::createWithNameAsKeys which creates a converter based on the Enum::name method to identify enums, rather than Enum::toString. This is preferable because Enum::name is final and cannot be overwritten, as opposed to Enum::toString. EnumAttributeConverter::create is kept as it is, for backward compatibility." + }, + { + "type": "feature", + "category": "AWS Account", + "contributor": "", + "description": "Improve pagination support for ListRegions" + }, + { + "type": "feature", + "category": "AWS Application Discovery Service", + "contributor": "", + "description": "Add Amazon EC2 instance recommendations export" + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "Documentation updates for AWS Identity and Access Management (IAM)." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Updates the *InstanceStorageConfig APIs to support a new ResourceType: SCREEN_RECORDINGS to enable screen recording and specify the storage configurations for publishing the recordings. Also updates DescribeInstance and ListInstances APIs to include InstanceAccessUrl attribute in the API response." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "This release adds SDK support for request-payer request header and request-charged response header in the \"GetBucketAccelerateConfiguration\", \"ListMultipartUploads\", \"ListObjects\", \"ListObjectsV2\" and \"ListObjectVersions\" S3 APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.88.json b/.changes/2.20.88.json new file mode 100644 index 000000000000..41dfe7f02632 --- /dev/null +++ b/.changes/2.20.88.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.88", + "date": "2023-06-19", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "flittev", + "description": "`WaiterExecutor` recursive implementation changed to iterative" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release adds support for creating cross region table/database resource links" + }, + { + "type": "feature", + "category": "AWS Price List Service", + "contributor": "", + "description": "This release updates the PriceListArn regex pattern." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation only update to address various tickets." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "API changes to AWS Verified Access to include data from trust providers in logs" + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Update MaxItems upper bound to 1000 for ListPricesRequest" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.89.json b/.changes/2.20.89.json new file mode 100644 index 000000000000..bc9f9f8cbf13 --- /dev/null +++ b/.changes/2.20.89.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.89", + "date": "2023-06-20", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Updated ResourceType enum with new resource types onboarded by AWS Config in May 2023." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release adds RecursiveInvocationException to the Invoke API and InvokeWithResponseStream API." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "This release adds new API to reset connector metadata cache" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for targeting Dedicated Host allocations by assetIds in AWS Outposts" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Added support for custom domain names for Redshift Provisioned clusters. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.90.json b/.changes/2.20.90.json new file mode 100644 index 000000000000..a94f1ff1743c --- /dev/null +++ b/.changes/2.20.90.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.90", + "date": "2023-06-21", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release introduces the bandwidth reduction filter for the HEVC encoder, increases the limits of outputs per job, and updates support for the Nagra SDK to version 1.14.7." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "This release adds a new parameter StructuredLogDestinations to CreateServer, UpdateServer APIs." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Documentation updates for DynamoDB" + }, + { + "type": "feature", + "category": "Amazon EMR", + "contributor": "", + "description": "This release introduces a new Amazon EMR EPI called ListSupportedInstanceTypes that returns a list of all instance types supported by a given EMR release." + }, + { + "type": "feature", + "category": "AmazonMQ", + "contributor": "", + "description": "The Cross Region Disaster Recovery feature allows to replicate a brokers state from one region to another in order to provide customers with multi-region resiliency in the event of a regional outage." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release provides support in SageMaker for output files in training jobs to be uploaded without compression and enable customer to deploy uncompressed model from S3 to real-time inference Endpoints. In addition, ml.trn1n.32xlarge is added to supported instance type list in training job." + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds support for Software Bill of Materials (SBOM) export and the general availability of code scanning for AWS Lambda functions." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.91.json b/.changes/2.20.91.json new file mode 100644 index 000000000000..f31d6bdac006 --- /dev/null +++ b/.changes/2.20.91.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.91", + "date": "2023-06-22", + "entries": [ + { + "type": "bugfix", + "category": "Maven config", + "contributor": "jensim", + "description": "Fix the scm.url in the maven project" + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Introducing Amazon Kendra Retrieve API that can be used to retrieve relevant passages or text excerpts given an input query." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "contributor": "", + "description": "Adds support for Versions and Aliases. Adds 8 operations: PublishStateMachineVersion, DeleteStateMachineVersion, ListStateMachineVersions, CreateStateMachineAlias, DescribeStateMachineAlias, UpdateStateMachineAlias, DeleteStateMachineAlias, ListStateMachineAliases" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Identity", + "contributor": "", + "description": "AppInstanceBots can be configured to be invoked or not using the Target or the CHIME.mentions attribute for ChannelMessages" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Messaging", + "contributor": "", + "description": "ChannelMessages can be made visible to sender and intended recipient rather than all channel members with the target attribute. For example, a user can send messages to a bot and receive messages back in a group channel without other members seeing them." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.92.json b/.changes/2.20.92.json new file mode 100644 index 000000000000..e433eae88956 --- /dev/null +++ b/.changes/2.20.92.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.92", + "date": "2023-06-23", + "entries": [ + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "This release adds support for encryption via customer managed keys." + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Update to Amazon FSx documentation." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Documentation improvements for create, describe, and modify DB clusters and DB instances." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "Added improved descriptions and new code samples to SDK documentation." + } + ] +} \ No newline at end of file diff --git a/.github/workflows/stale-issue.yml b/.github/workflows/stale-issue.yml index 13307c9a5a25..cf1afebdff5f 100644 --- a/.github/workflows/stale-issue.yml +++ b/.github/workflows/stale-issue.yml @@ -44,7 +44,7 @@ jobs: # Issue timing days-before-stale: 5 days-before-close: 2 - days-before-ancient: 1095 + days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is diff --git a/CHANGELOG.md b/CHANGELOG.md index 78ce06942be0..a6efa51ef75e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,786 @@ +# __2.20.92__ __2023-06-23__ +## __Amazon DevOps Guru__ + - ### Features + - This release adds support for encryption via customer managed keys. + +## __Amazon FSx__ + - ### Features + - Update to Amazon FSx documentation. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation improvements for create, describe, and modify DB clusters and DB instances. + +## __Amazon Verified Permissions__ + - ### Features + - Added improved descriptions and new code samples to SDK documentation. + +# __2.20.91__ __2023-06-22__ +## __AWS Step Functions__ + - ### Features + - Adds support for Versions and Aliases. Adds 8 operations: PublishStateMachineVersion, DeleteStateMachineVersion, ListStateMachineVersions, CreateStateMachineAlias, DescribeStateMachineAlias, UpdateStateMachineAlias, DeleteStateMachineAlias, ListStateMachineAliases + +## __AWSKendraFrontendService__ + - ### Features + - Introducing Amazon Kendra Retrieve API that can be used to retrieve relevant passages or text excerpts given an input query. + +## __Amazon Chime SDK Identity__ + - ### Features + - AppInstanceBots can be configured to be invoked or not using the Target or the CHIME.mentions attribute for ChannelMessages + +## __Amazon Chime SDK Messaging__ + - ### Features + - ChannelMessages can be made visible to sender and intended recipient rather than all channel members with the target attribute. For example, a user can send messages to a bot and receive messages back in a group channel without other members seeing them. + +## __Maven config__ + - ### Bugfixes + - Fix the scm.url in the maven project + - Contributed by: [@jensim](https://github.com/jensim) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@jensim](https://github.com/jensim) +# __2.20.90__ __2023-06-21__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release introduces the bandwidth reduction filter for the HEVC encoder, increases the limits of outputs per job, and updates support for the Nagra SDK to version 1.14.7. + +## __AWS Transfer Family__ + - ### Features + - This release adds a new parameter StructuredLogDestinations to CreateServer, UpdateServer APIs. + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for DynamoDB + +## __Amazon EMR__ + - ### Features + - This release introduces a new Amazon EMR EPI called ListSupportedInstanceTypes that returns a list of all instance types supported by a given EMR release. + +## __Amazon SageMaker Service__ + - ### Features + - This release provides support in SageMaker for output files in training jobs to be uploaded without compression and enable customer to deploy uncompressed model from S3 to real-time inference Endpoints. In addition, ml.trn1n.32xlarge is added to supported instance type list in training job. + +## __AmazonMQ__ + - ### Features + - The Cross Region Disaster Recovery feature allows to replicate a brokers state from one region to another in order to provide customers with multi-region resiliency in the event of a regional outage. + +## __Inspector2__ + - ### Features + - This release adds support for Software Bill of Materials (SBOM) export and the general availability of code scanning for AWS Lambda functions. + +# __2.20.89__ __2023-06-20__ +## __AWS Config__ + - ### Features + - Updated ResourceType enum with new resource types onboarded by AWS Config in May 2023. + +## __AWS Lambda__ + - ### Features + - This release adds RecursiveInvocationException to the Invoke API and InvokeWithResponseStream API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Appflow__ + - ### Features + - This release adds new API to reset connector metadata cache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support for targeting Dedicated Host allocations by assetIds in AWS Outposts + +## __Amazon Redshift__ + - ### Features + - Added support for custom domain names for Redshift Provisioned clusters. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it. + +# __2.20.88__ __2023-06-19__ +## __AWS CloudFormation__ + - ### Features + - Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter + +## __AWS Glue__ + - ### Features + - This release adds support for creating cross region table/database resource links + +## __AWS Price List Service__ + - ### Features + - This release updates the PriceListArn regex pattern. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - `WaiterExecutor` recursive implementation changed to iterative + - Contributed by: [@flittev](https://github.com/flittev) + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation only update to address various tickets. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - API changes to AWS Verified Access to include data from trust providers in logs + +## __Amazon Route 53 Domains__ + - ### Features + - Update MaxItems upper bound to 1000 for ListPricesRequest + +## __Amazon SageMaker Service__ + - ### Features + - Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@flittev](https://github.com/flittev) +# __2.20.87__ __2023-06-16__ +## __AWS Account__ + - ### Features + - Improve pagination support for ListRegions + +## __AWS Application Discovery Service__ + - ### Features + - Add Amazon EC2 instance recommendations export + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for AWS Identity and Access Management (IAM). + +## __Amazon Connect Service__ + - ### Features + - Updates the *InstanceStorageConfig APIs to support a new ResourceType: SCREEN_RECORDINGS to enable screen recording and specify the storage configurations for publishing the recordings. Also updates DescribeInstance and ListInstances APIs to include InstanceAccessUrl attribute in the API response. + +## __Amazon DynamoDB__ + - ### Bugfixes + - Created static method EnumAttributeConverter::createWithNameAsKeys which creates a converter based on the Enum::name method to identify enums, rather than Enum::toString. This is preferable because Enum::name is final and cannot be overwritten, as opposed to Enum::toString. EnumAttributeConverter::create is kept as it is, for backward compatibility. + - Contributed by: [@martinKindall](https://github.com/martinKindall) + +## __Amazon Simple Storage Service__ + - ### Features + - This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@martinKindall](https://github.com/martinKindall) +# __2.20.86__ __2023-06-15__ +## __AWS Audit Manager__ + - ### Features + - This release introduces 2 Audit Manager features: CSV exports and new manual evidence options. You can now export your evidence finder results in CSV format. In addition, you can now add manual evidence to a control by entering free-form text or uploading a file from your browser. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon DynamoDB Enhanced__ + - ### Bugfixes + - Thanks to this bugfix it'll be possible to create DynamoDB table containing + secondary indices when using no arugments `createTable` method from `DefaultDynamoDbTable` + class. Information about their presence might be expressed using annotations, but it was ignored + and created tables didn't contain specified indices. Plase note that it is still not possible + to specify projections for indices using annotations. By default, all fields will be projected. + - Contributed by: [@breader124](https://github.com/breader124) + +## __Amazon Elastic File System__ + - ### Features + - Documentation updates for EFS. + +## __Amazon GuardDuty__ + - ### Features + - Updated descriptions for some APIs. + +## __Amazon Location Service__ + - ### Features + - Amazon Location Service adds categories to places, including filtering on those categories in searches. Also, you can now add metadata properties to your geofences. + +## __DynamoDB Enhanced Client__ + - ### Features + - Add EnhancedType parameters to static builder methods of StaticTableSchema and StaticImmitableTableSchema + - Contributed by: [@bmaizels](https://github.com/bmaizels) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) +# __2.20.85__ __2023-06-13__ +## __AWS CloudTrail__ + - ### Features + - This feature allows users to view dashboards for CloudTrail Lake event data stores. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Add support for Security Hub Automation Rules + +## __AWS SimSpace Weaver__ + - ### Features + - This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs. + +## __AWS WAFV2__ + - ### Features + - You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. + +## __AWS Well-Architected Tool__ + - ### Features + - AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes. + +## __Amazon CodeGuru Security__ + - ### Features + - Initial release of Amazon CodeGuru Security APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address. + +## __Amazon Lightsail__ + - ### Features + - This release adds pagination for the Get Certificates API operation. + +## __Amazon Simple Storage Service__ + - ### Features + - Integrate double encryption feature to SDKs. + +## __Amazon Verified Permissions__ + - ### Features + - GA release of Amazon Verified Permissions. + +## __EC2 Image Builder__ + - ### Features + - Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data. + +## __Elastic Disaster Recovery Service__ + - ### Features + - Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery. + +# __2.20.84__ __2023-06-12__ +## __AWS Amplify UI Builder__ + - ### Features + - AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms. + +## __AWS SDK for Java v2__ + - ### Features + - Add "unsafe" and "fromRemaining" AsyncRequestBody constructors for byte arrays and ByteBuffers + - Contributed by: [@StephenFlavin](https://github.com/StephenFlavin) + - Updated endpoint and partition metadata. + - `IdleConnectionReaper` now does not prevent `HttpClientConnectionManager` from getting GC'd in the case where an SDK client is created per request and not closed. + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for DynamoDB + +## __Amazon DynamoDB Streams__ + - ### Features + - Documentation updates for DynamoDB Streams + +## __Amazon FSx__ + - ### Features + - Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created. + +## __Amazon OpenSearch Service__ + - ### Features + - This release adds support for SkipUnavailable connection property for cross cluster search + +## __Amazon Rekognition__ + - ### Features + - This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector. + +## __Amazon SageMaker Service__ + - ### Features + - Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@StephenFlavin](https://github.com/StephenFlavin) +# __2.20.83__ __2023-06-09__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - Document-only update to refresh CLI documentation for AWS Private CA. No change to the service. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixed issue with leased connection leaks when threads executing HTTP connections with Apache HttpClient were interrupted while the connection was in progress. + +## __Amazon Connect Service__ + - ### Features + - This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - By default, Netty threads are blocked during dns resolution, namely InetAddress.getByName is used under the hood. Now, there's an option to configure the NettyNioAsyncHttpClient in order to use a non blocking dns resolution strategy. + - Contributed by: [@martinKindall](https://github.com/martinKindall) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@martinKindall](https://github.com/martinKindall) +# __2.20.82__ __2023-06-08__ +## __AWS Comprehend Medical__ + - ### Features + - This release supports a new set of entities and traits. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS STS__ + - ### Features + - Updates the core STS credential provider logic to return AwsSessionCredentials instead of an STS-specific class, and adds expirationTime to AwsSessionCredentials + +## __AWS Service Catalog__ + - ### Features + - New parameter added in ServiceCatalog DescribeProvisioningArtifact api - IncludeProvisioningArtifactParameters. This parameter can be used to return information about the parameters used to provision the product + +## __Amazon Athena__ + - ### Features + - You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. + +## __Amazon Timestream Write__ + - ### Features + - This release adds the capability for customers to define how their data should be partitioned, optimizing for certain access patterns. This definition will take place as a part of the table creation. + +## __Payment Cryptography Control Plane__ + - ### Features + - Initial release of AWS Payment Cryptography Control Plane service for creating and managing cryptographic keys used during card payment processing. + +## __Payment Cryptography Data Plane__ + - ### Features + - Initial release of AWS Payment Cryptography DataPlane Plane service for performing cryptographic operations typically used during card payment processing. + +# __2.20.81__ __2023-06-07__ +## __AWS CloudFormation__ + - ### Features + - AWS CloudFormation StackSets is updating the deployment experience for all stackset operations to skip suspended AWS accounts during deployments. StackSets will skip target AWS accounts that are suspended and set the Detailed Status of the corresponding stack instances as SKIPPED_SUSPENDED_ACCOUNT + +## __AWS Direct Connect__ + - ### Features + - This update corrects the jumbo frames mtu values from 9100 to 8500 for transit virtual interfaces. + +## __AWS IoT Core Device Advisor__ + - ### Features + - AWS IoT Core Device Advisor now supports new Qualification Suite test case list. With this update, customers can more easily create new qualification test suite with an empty rootGroup input. + +## __Amazon CloudWatch Logs__ + - ### Features + - This change adds support for account level data protection policies using 3 new APIs, PutAccountPolicy, DeleteAccountPolicy and DescribeAccountPolicy. DescribeLogGroup API has been modified to indicate if account level policy is applied to the LogGroup via "inheritedProperties" list in the response. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This release introduces event stream related APIs. + +## __Amazon EMR Containers__ + - ### Features + - EMR on EKS adds support for log rotation of Spark container logs with EMR-6.11.0 onwards, to the StartJobRun API. + +# __2.20.80__ __2023-06-06__ +## __AWS Identity and Access Management__ + - ### Features + - This release updates the AccountAlias regex pattern with the same length restrictions enforced by the length constraint. + +## __AWS IoT__ + - ### Features + - Adding IoT Device Management Software Package Catalog APIs to register, store, and report system software packages, along with their versions and metadata in a centralized location. + +## __AWS IoT Data Plane__ + - ### Features + - Update thing shadow name regex to allow '$' character + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS Signer__ + - ### Features + - AWS Signer is launching Container Image Signing, a new feature that enables you to sign and verify container images. This feature enables you to validate that only container images you approve are used in your enterprise. + +## __Amazon Connect Service__ + - ### Features + - GetMetricDataV2 API is now available in AWS GovCloud(US) region. + +## __Amazon EMR__ + - ### Features + - This release provides customers the ability to specify an allocation strategies amongst PRICE_CAPACITY_OPTIMIZED, CAPACITY_OPTIMIZED, LOWEST_PRICE, DIVERSIFIED for Spot instances in Instance Feet cluster. This enables customers to choose an allocation strategy best suited for their workload. + +## __Amazon Lex Model Building V2__ + - ### Features + - This release adds support for Lex Developers to create test sets and to execute those test-sets against their bots. + +## __Amazon QuickSight__ + - ### Features + - QuickSight support for pivot table field collapse state, radar chart range scale and multiple scope options in conditional formatting. + +## __Amazon Simple Queue Service__ + - ### Features + - Amazon SQS adds three new APIs - StartMessageMoveTask, CancelMessageMoveTask, and ListMessageMoveTasks to automate redriving messages from dead-letter queues to source queues or a custom destination. + +## __Inspector2__ + - ### Features + - Adds new response properties and request parameters for 'last scanned at' on the ListCoverage operation. This feature allows you to search and view the date of which your resources were last scanned by Inspector. + +# __2.20.79__ __2023-06-05__ +## __AWS CloudFormation__ + - ### Features + - AWS CloudFormation StackSets provides customers with three new APIs to activate, deactivate, and describe AWS Organizations trusted access which is needed to get started with service-managed StackSets. + +## __AWS Key Management Service__ + - ### Features + - This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. + +## __AWS Lambda__ + - ### Features + - Add Ruby 3.2 (ruby3.2) Runtime support to AWS Lambda. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Upgrading AWS CRT dependency to v0.21.17. This version contains minor fixes and updates + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Making InstanceTagAttribute as the required parameter for the DeregisterInstanceEventNotificationAttributes and RegisterInstanceEventNotificationAttributes APIs. + +## __Amazon Fraud Detector__ + - ### Features + - Added new variable types, new DateTime data type, and new rules engine functions for interacting and working with DateTime data types. + +## __Amazon Keyspaces__ + - ### Features + - This release adds support for MRR GA launch, and includes multiregion support in create-keyspace, get-keyspace, and list-keyspace. + +## __AmazonMWAA__ + - ### Features + - This release adds ROLLING_BACK and CREATING_SNAPSHOT environment statuses for Amazon MWAA environments. + +## __FinSpace User Environment Management service__ + - ### Features + - Releasing new Managed kdb Insights APIs + +# __2.20.78__ __2023-06-02__ +## __AWS CloudTrail__ + - ### Features + - This feature allows users to start and stop event ingestion on a CloudTrail Lake event data store. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS WAFV2__ + - ### Features + - Added APIs to describe managed products. The APIs retrieve information about rule groups that are managed by AWS and by AWS Marketplace sellers. + +## __Amazon Athena__ + - ### Features + - This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation + +## __Amazon SageMaker Service__ + - ### Features + - This release adds Selective Execution feature that allows SageMaker Pipelines users to run selected steps in a pipeline. + +# __2.20.77__ __2023-06-01__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS WAFV2__ + - ### Features + - Corrected the information for the header order FieldToMatch setting + +## __Alexa For Business__ + - ### Features + - Alexa for Business has been deprecated and is no longer supported. + +## __Amazon Appflow__ + - ### Features + - Added ability to select DataTransferApiType for DescribeConnector and CreateFlow requests when using Async supported connectors. Added supportedDataTransferType to DescribeConnector/DescribeConnectors/ListConnector response. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This release introduces calculated attribute related APIs. + +## __Amazon Interactive Video Service__ + - ### Features + - API Update for IVS Advanced Channel type + +## __Amazon SageMaker Service__ + - ### Features + - Amazon Sagemaker Autopilot adds support for Parquet file input to NLP text classification jobs. + +# __2.20.76__ __2023-05-31__ +## __AWS Config__ + - ### Features + - Resource Types Exclusion feature launch by AWS Config + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix an issue where the optimal number of parts calculated could be higher than 10,000 + +## __AWS Service Catalog__ + - ### Features + - Documentation updates for ServiceCatalog. + +## __AWSMainframeModernization__ + - ### Features + - Adds an optional create-only 'roleArn' property to Application resources. Enables PS and PO data set org types. + +## __Amazon Fraud Detector__ + - ### Features + - This release enables publishing event predictions from Amazon Fraud Detector (AFD) to Amazon EventBridge. For example, after getting predictions from AFD, Amazon EventBridge rules can be configured to trigger notification through an SNS topic, send a message with SES, or trigger Lambda workflows. + +## __Amazon HealthLake__ + - ### Features + - This release adds a new request parameter to the CreateFHIRDatastore API operation. IdentityProviderConfiguration specifies how you want to authenticate incoming requests to your Healthlake Data Store. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for changing the engine for Oracle using the ModifyDbInstance API + +## __Amazon WorkSpaces Web__ + - ### Features + - WorkSpaces Web now allows you to control which IP addresses your WorkSpaces Web portal may be accessed from. + +# __2.20.75__ __2023-05-30__ +## __AWS Glue__ + - ### Features + - Added Runtime parameter to allow selection of Ray Runtime + +## __AWS Ground Station__ + - ### Features + - Updating description of GetMinuteUsage to be clearer. + +## __AWS IoT FleetWise__ + - ### Features + - Campaigns now support selecting Timestream or S3 as the data destination, Signal catalogs now support "Deprecation" keyword released in VSS v2.1 and "Comment" keyword released in VSS v3.0 + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Added new resource detail objects to ASFF, including resources for AwsGuardDutyDetector, AwsAmazonMqBroker, AwsEventSchemasRegistry, AwsAppSyncGraphQlApi and AwsStepFunctionStateMachine. + +## __AWS WAFV2__ + - ### Features + - This SDK release provides customers the ability to use Header Order as a field to match. + +## __Amazon Chime SDK Voice__ + - ### Features + - Added optional CallLeg field to StartSpeakerSearchTask API request + +## __Amazon Location Service__ + - ### Features + - This release adds API support for political views for the maps service APIs: CreateMap, UpdateMap, DescribeMap. + +## __Amazon MemoryDB__ + - ### Features + - Amazon MemoryDB for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 + +## __Amazon Personalize__ + - ### Features + - This release provides support for the exclusion of certain columns for training when creating a solution and creating or updating a recommender with Amazon Personalize. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds 2 new voices - Sofie (da-DK) and Niamh (en-IE) + +## __Amazon Security Lake__ + - ### Features + - Log sources are now versioned. AWS log sources and custom sources will now come with a version identifier that enables producers to vend multiple schema versions to subscribers. Security Lake API have been refactored to more closely align with AWS API conventions. + +# __2.20.74__ __2023-05-26__ +## __AWS IoT Wireless__ + - ### Features + - Add Multicast Group support in Network Analyzer Configuration. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - Documentation update for a new Initiation Method value in DescribeContact API + +## __Amazon SageMaker Service__ + - ### Features + - Added ml.p4d and ml.inf1 as supported instance type families for SageMaker Notebook Instances. + +# __2.20.73__ __2023-05-25__ +## __AWS CodePipeline__ + - ### Features + - Add PollingDisabledAt time information in PipelineMetadata object of GetPipeline API. + +## __AWS Glue__ + - ### Features + - Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values. + +## __AWS Migration Hub Refactor Spaces__ + - ### Features + - This SDK update allows for path parameter syntax to be passed to the CreateRoute API. Path parameter syntax require parameters to be enclosed in {} characters. This update also includes a new AppendSourcePath field which lets users forward the source path to the Service URL endpoint. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon GameLift__ + - ### Features + - GameLift FleetIQ users can now filter game server claim requests to exclude servers on instances that are draining. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Automatic Model Tuning now supports enabling Autotune for tuning jobs which can choose tuning job configurations. + +## __Application Auto Scaling__ + - ### Features + - With this release, ElastiCache customers will be able to use predefined metricType "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" for their ElastiCache instances. + +# __2.20.72__ __2023-05-24__ +## __AWS AppSync__ + - ### Features + - This release introduces AppSync Merged APIs, which provide the ability to compose multiple source APIs into a single federated/merged API. + +## __AWS Cost and Usage Report Service__ + - ### Features + - Add support for split cost allocation data on a report. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - Amazon Connect Evaluation Capabilities: validation improvements + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker now provides an instantaneous deployment recommendation through the DescribeModel API + +# __2.20.71__ __2023-05-23__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon SageMaker Service__ + - ### Features + - Added ModelNameEquals, ModelPackageVersionArnEquals in request and ModelName, SamplePayloadUrl, ModelPackageVersionArn in response of ListInferenceRecommendationsJobs API. Added Invocation timestamps in response of DescribeInferenceRecommendationsJob API & ListInferenceRecommendationsJobSteps API. + +## __Amazon Translate__ + - ### Features + - Added support for calling TranslateDocument API. + +## __Firewall Management Service__ + - ### Features + - Fixes issue that could cause calls to GetAdminScope and ListAdminAccountsForOrganization to return a 500 Internal Server error. + +# __2.20.70__ __2023-05-22__ +## __AWS Backup__ + - ### Features + - Added support for tags on restore. + +## __AWS SDK for Java v2__ + - ### Features + - Add client configuration overriding of SCHEDULED_EXECUTOR_SERVICE option + - Contributed by: [@scrocquesel](https://github.com/scrocquesel) + - This commit adds a new ErrorType core metric that is recorded for all failed API call attempts. The ErrorType records the general category of error that ocurred for a failed API call attempt. Those categories are: + + - Throttling errors + - Service errors other than throttling + - I/O errors + - API call or API call attempt timeouts + + The intent of this metric is to help locate possible issues at a glance and help direct further debugging or investigation. + +## __Amazon Pinpoint__ + - ### Features + - Amazon Pinpoint is deprecating the tags parameter in the UpdateSegment, UpdateCampaign, UpdateEmailTemplate, UpdateSmsTemplate, UpdatePushTemplate, UpdateInAppTemplate and UpdateVoiceTemplate. Amazon Pinpoint will end support tags parameter by May 22, 2023. + +## __Amazon QuickSight__ + - ### Features + - Add support for Asset Bundle, Geospatial Heatmaps. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@scrocquesel](https://github.com/scrocquesel) +# __2.20.69__ __2023-05-19__ +## __AWS Backup__ + - ### Features + - Add ResourceArn, ResourceType, and BackupVaultName to ListRecoveryPointsByLegalHold API response. + +## __AWS Elemental MediaPackage v2__ + - ### Features + - Adds support for the MediaPackage Live v2 API + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Cases__ + - ### Features + - This release adds the ability to create fields with type Url through the CreateField API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html + +## __Amazon Simple Email Service__ + - ### Features + - This release allows customers to update scaling mode property of dedicated IP pools with PutDedicatedIpPoolScalingAttributes call. + +# __2.20.68__ __2023-05-18__ +## __AWS CloudTrail__ + - ### Features + - Add ConflictException to PutEventSelectors, add (Channel/EDS)ARNInvalidException to Tag APIs. These exceptions provide customers with more specific error messages instead of internal errors. + +## __AWS Compute Optimizer__ + - ### Features + - In this launch, we add support for showing integration status with external metric providers such as Instana, Datadog ...etc in GetEC2InstanceRecommendations and ExportEC2InstanceRecommendations apis + +## __AWS Elemental MediaConvert__ + - ### Features + - This release introduces a new MXF Profile for XDCAM which is strictly compliant with the SMPTE RDD 9 standard and improved handling of output name modifiers. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - This update fixes an issue where CompletableFutures are leaked/never completed when the submission to the FUTURE_COMPLETE_EXECUTOR is rejected. + + By default, the SDK uses `2 * number of cores` (with a maximum of 64), and uses bounded queue of size 1000. In cases where the throughput to the client exceeds the executor's ability to keep up, it would reject executions. Before this change this would lead to leaked futures. + +## __AWS Security Token Service__ + - ### Features + - API updates for the AWS Security Token Service + +## __Amazon Athena__ + - ### Features + - Removing SparkProperties from EngineConfiguration object for StartSession API call + +## __Amazon Connect Service__ + - ### Features + - You can programmatically create and manage prompts using APIs, for example, to extract prompts stored within Amazon Connect and add them to your Amazon S3 bucket. AWS CloudTrail, AWS CloudFormation and tagging are supported. + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation only release to address various tickets. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add support for i4g.large, i4g.xlarge, i4g.2xlarge, i4g.4xlarge, i4g.8xlarge and i4g.16xlarge instances powered by AWS Graviton2 processors that deliver up to 15% better compute performance than our other storage-optimized instances. + +## __Amazon Relational Database Service__ + - ### Features + - RDS documentation update for the EngineVersion parameter of ModifyDBSnapshot + +## __Amazon SageMaker geospatial capabilities__ + - ### Features + - This release makes ExecutionRoleArn a required field in the StartEarthObservationJob API. + +## __S3 Transfer Manager__ + - ### Bugfixes + - Fixed the issue where S3 Transfer Manager attempted to load AWS CRT classes when Java based S3 client was used. See [#3936](https://github.com/aws/aws-sdk-java-v2/issues/3936). + # __2.20.67__ __2023-05-16__ ## __AWS Direct Connect__ - ### Features diff --git a/README.md b/README.md index 7cad11c9dbac..1635c0cbb96c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-88-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-89-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.20.67 + 2.20.92 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.20.67 + 2.20.92 software.amazon.awssdk s3 - 2.20.67 + 2.20.92 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.20.67 + 2.20.92 ``` @@ -304,6 +304,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Andy Kiesler
Andy Kiesler

💻 Martin
Martin

💻 Paulo Lieuthier
Paulo Lieuthier

💻 + Sébastien Crocquesel
Sébastien Crocquesel

💻 + David Negrete
David Negrete

💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 88294e74dfbc..aeba019c29b0 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 82fed8b7c5fb..4c713738fbba 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index 5608d0e3f6d4..a5fcd4de62be 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index f9a82189cd41..045b2726a5f6 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 4584cc830b6a..27465ba8b86a 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../pom.xml aws-sdk-java @@ -1738,6 +1738,31 @@ Amazon AutoScaling, etc). osis ${awsjavasdk.version} + + software.amazon.awssdk + mediapackagev2 + ${awsjavasdk.version} + + + software.amazon.awssdk + paymentcryptographydata + ${awsjavasdk.version} + + + software.amazon.awssdk + paymentcryptography + ${awsjavasdk.version} + + + software.amazon.awssdk + codegurusecurity + ${awsjavasdk.version} + + + software.amazon.awssdk + verifiedpermissions + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 78db2d4d7298..b08dff91d69d 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 @@ -134,6 +134,16 @@ netty-buffer ${netty.version} + + io.netty + netty-resolver + ${netty.version} + + + io.netty + netty-resolver-dns + ${netty.version} + org.reactivestreams reactive-streams diff --git a/bom/pom.xml b/bom/pom.xml index 40520dab3de2..b256978d1ff8 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../pom.xml bom @@ -1888,6 +1888,31 @@ osis ${awsjavasdk.version} + + software.amazon.awssdk + mediapackagev2 + ${awsjavasdk.version} + + + software.amazon.awssdk + paymentcryptographydata + ${awsjavasdk.version} + + + software.amazon.awssdk + paymentcryptography + ${awsjavasdk.version} + + + software.amazon.awssdk + codegurusecurity + ${awsjavasdk.version} + + + software.amazon.awssdk + verifiedpermissions + ${awsjavasdk.version} + diff --git a/bundle/pom.xml b/bundle/pom.xml index 7e245d7b1694..a4e78be89e8e 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 3717885c3cdd..143af638c99f 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index c69a57ecc043..1dce75dc7b1d 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 9315c59897f8..151355be6d21 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 44b44f70c55d..d374e8618f34 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/EndpointProviderTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/EndpointProviderTasks.java index 2205754fe02e..d6a91a967380 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/EndpointProviderTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/EndpointProviderTasks.java @@ -52,9 +52,11 @@ protected List createTasks() throws Exception { tasks.add(generateDefaultProvider()); tasks.addAll(generateInterceptors()); if (shouldGenerateEndpointTests()) { - tasks.add(generateClientTests()); tasks.add(generateProviderTests()); } + if (shouldGenerateEndpointTests() && shouldGenerateClientEndpointTests()) { + tasks.add(generateClientTests()); + } if (hasClientContextParams()) { tasks.add(generateClientContextParams()); } @@ -118,6 +120,13 @@ private boolean shouldGenerateEndpointTests() { !generatorTaskParams.getModel().getEndpointTestSuiteModel().getTestCases().isEmpty(); } + private boolean shouldGenerateClientEndpointTests() { + CustomizationConfig customizationConfig = generatorTaskParams.getModel().getCustomizationConfig(); + boolean noTestCasesHaveOperationInputs = model.getEndpointTestSuiteModel().getTestCases().stream() + .noneMatch(t -> t.getOperationInputs() != null); + return noTestCasesHaveOperationInputs && Boolean.TRUE.equals(customizationConfig.isGenerateEndpointClientTests()); + } + private boolean hasClientContextParams() { Map clientContextParams = model.getClientContextParams(); return clientContextParams != null && !clientContextParams.isEmpty(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index d5aff3419b33..6913bc6e83f8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -217,6 +217,11 @@ public class CustomizationConfig { */ private boolean skipEndpointTestGeneration; + /** + * Whether to generate client-level endpoint tests; overrides test case criteria such as operation inputs. + */ + private boolean generateEndpointClientTests; + /** * A mapping from the skipped test's description to the reason why it's being skipped. */ @@ -577,6 +582,14 @@ public void setSkipEndpointTestGeneration(boolean skipEndpointTestGeneration) { this.skipEndpointTestGeneration = skipEndpointTestGeneration; } + public boolean isGenerateEndpointClientTests() { + return generateEndpointClientTests; + } + + public void setGenerateEndpointClientTests(boolean generateEndpointClientTests) { + this.generateEndpointClientTests = generateEndpointClientTests; + } + public boolean useGlobalEndpoint() { return useGlobalEndpoint; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java index f8731d7dad07..bd0e6023d53e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java @@ -18,8 +18,12 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; @@ -32,6 +36,9 @@ import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.utils.CollectionUtils; public class AsyncClientBuilderClass implements ClassSpec { private final IntermediateModel model; @@ -119,26 +126,53 @@ private MethodSpec endpointProviderMethod() { } private MethodSpec buildClientMethod() { - return MethodSpec.methodBuilder("buildClient") - .addAnnotation(Override.class) - .addModifiers(Modifier.PROTECTED, Modifier.FINAL) - .returns(clientInterfaceName) - .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", SdkClientConfiguration.class) - .addStatement("this.validateClientOptions(clientConfiguration)") - .addStatement("$T endpointOverride = null", URI.class) - .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" - + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" - + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" - + "}", - SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) - .addStatement("$T serviceClientConfiguration = $T.builder()" - + ".overrideConfiguration(overrideConfiguration())" - + ".region(clientConfiguration.option($T.AWS_REGION))" - + ".endpointOverride(endpointOverride)" - + ".build()", - serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) - .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) - .build(); + MethodSpec.Builder b = MethodSpec.methodBuilder("buildClient") + .addAnnotation(Override.class) + .addModifiers(Modifier.PROTECTED, Modifier.FINAL) + .returns(clientInterfaceName) + .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", + SdkClientConfiguration.class); + + addQueryProtocolInterceptors(b); + + return b.addStatement("this.validateClientOptions(clientConfiguration)") + .addStatement("$T endpointOverride = null", URI.class) + .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" + + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" + + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" + + "}", + SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) + .addStatement("$T serviceClientConfiguration = $T.builder()" + + ".overrideConfiguration(overrideConfiguration())" + + ".region(clientConfiguration.option($T.AWS_REGION))" + + ".endpointOverride(endpointOverride)" + + ".build()", + serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) + .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) + .build(); + } + + private MethodSpec.Builder addQueryProtocolInterceptors(MethodSpec.Builder b) { + if (!model.getMetadata().isQueryProtocol()) { + return b; + } + + TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); + + b.addStatement("$T interceptors = clientConfiguration.option($T.EXECUTION_INTERCEPTORS)", + listType, SdkClientOption.class) + .addStatement("$T queryParamsToBodyInterceptor = $T.singletonList(new $T())", + listType, Collections.class, QueryParametersToBodyInterceptor.class) + .addStatement("$T customizationInterceptors = new $T<>()", listType, ArrayList.class); + + List customInterceptors = model.getCustomizationConfig().getInterceptors(); + customInterceptors.forEach(i -> b.addStatement("customizationInterceptors.add(new $T())", ClassName.bestGuess(i))); + + b.addStatement("interceptors = $T.mergeLists(queryParamsToBodyInterceptor, interceptors)", CollectionUtils.class) + .addStatement("interceptors = $T.mergeLists(customizationInterceptors, interceptors)", CollectionUtils.class); + + return b.addStatement("clientConfiguration = clientConfiguration.toBuilder().option($T.EXECUTION_INTERCEPTORS, " + + "interceptors).build()", SdkClientOption.class); } private MethodSpec bearerTokenProviderMethod() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index 1be4d730040e..72d534d5ab99 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -28,7 +28,6 @@ import com.squareup.javapoet.TypeSpec; import com.squareup.javapoet.TypeVariableName; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -59,7 +58,6 @@ import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkHttpConfigurationOption; -import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.StringUtils; @@ -262,8 +260,10 @@ private MethodSpec finalizeServiceConfigurationMethod() { builtInInterceptors.add(endpointRulesSpecUtils.authSchemesInterceptorName()); builtInInterceptors.add(endpointRulesSpecUtils.requestModifierInterceptorName()); - for (String interceptor : model.getCustomizationConfig().getInterceptors()) { - builtInInterceptors.add(ClassName.bestGuess(interceptor)); + if (!model.getMetadata().isQueryProtocol()) { + for (String interceptor : model.getCustomizationConfig().getInterceptors()) { + builtInInterceptors.add(ClassName.bestGuess(interceptor)); + } } for (ClassName interceptor : builtInInterceptors) { @@ -288,16 +288,6 @@ private MethodSpec finalizeServiceConfigurationMethod() { builder.addCode("interceptors = $T.mergeLists(interceptors, config.option($T.EXECUTION_INTERCEPTORS));\n", CollectionUtils.class, SdkClientOption.class); - if (model.getMetadata().isQueryProtocol()) { - TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); - builder.addStatement("$T protocolInterceptors = $T.singletonList(new $T())", - listType, - Collections.class, - QueryParametersToBodyInterceptor.class); - builder.addStatement("interceptors = $T.mergeLists(interceptors, protocolInterceptors)", - CollectionUtils.class); - } - if (model.getEndpointOperation().isPresent()) { builder.beginControlFlow("if (!endpointDiscoveryEnabled)") .addStatement("$1T chain = new $1T(config)", DefaultEndpointDiscoveryProviderChain.class) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java index 036589de04e8..8b330e76ce1b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java @@ -18,8 +18,12 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; @@ -32,6 +36,9 @@ import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.utils.CollectionUtils; public class SyncClientBuilderClass implements ClassSpec { private final IntermediateModel model; @@ -119,26 +126,53 @@ private MethodSpec endpointProviderMethod() { private MethodSpec buildClientMethod() { - return MethodSpec.methodBuilder("buildClient") - .addAnnotation(Override.class) - .addModifiers(Modifier.PROTECTED, Modifier.FINAL) - .returns(clientInterfaceName) - .addStatement("$T clientConfiguration = super.syncClientConfiguration()", SdkClientConfiguration.class) - .addStatement("this.validateClientOptions(clientConfiguration)") - .addStatement("$T endpointOverride = null", URI.class) - .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" - + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" - + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" - + "}", - SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) - .addStatement("$T serviceClientConfiguration = $T.builder()" - + ".overrideConfiguration(overrideConfiguration())" - + ".region(clientConfiguration.option($T.AWS_REGION))" - + ".endpointOverride(endpointOverride)" - + ".build()", - serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) - .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) - .build(); + MethodSpec.Builder b = MethodSpec.methodBuilder("buildClient") + .addAnnotation(Override.class) + .addModifiers(Modifier.PROTECTED, Modifier.FINAL) + .returns(clientInterfaceName) + .addStatement("$T clientConfiguration = super.syncClientConfiguration()", + SdkClientConfiguration.class); + + addQueryProtocolInterceptors(b); + + return b.addStatement("this.validateClientOptions(clientConfiguration)") + .addStatement("$T endpointOverride = null", URI.class) + .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" + + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" + + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" + + "}", + SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) + .addStatement("$T serviceClientConfiguration = $T.builder()" + + ".overrideConfiguration(overrideConfiguration())" + + ".region(clientConfiguration.option($T.AWS_REGION))" + + ".endpointOverride(endpointOverride)" + + ".build()", + serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) + .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) + .build(); + } + + private MethodSpec.Builder addQueryProtocolInterceptors(MethodSpec.Builder b) { + if (!model.getMetadata().isQueryProtocol()) { + return b; + } + + TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); + + b.addStatement("$T interceptors = clientConfiguration.option($T.EXECUTION_INTERCEPTORS)", + listType, SdkClientOption.class) + .addStatement("$T queryParamsToBodyInterceptor = $T.singletonList(new $T())", + listType, Collections.class, QueryParametersToBodyInterceptor.class) + .addStatement("$T customizationInterceptors = new $T<>()", listType, ArrayList.class); + + List customInterceptors = model.getCustomizationConfig().getInterceptors(); + customInterceptors.forEach(i -> b.addStatement("customizationInterceptors.add(new $T())", ClassName.bestGuess(i))); + + b.addStatement("interceptors = $T.mergeLists(queryParamsToBodyInterceptor, interceptors)", CollectionUtils.class) + .addStatement("interceptors = $T.mergeLists(customizationInterceptors, interceptors)", CollectionUtils.class); + + return b.addStatement("clientConfiguration = clientConfiguration.toBuilder().option($T.EXECUTION_INTERCEPTORS, " + + "interceptors).build()", SdkClientOption.class); } private MethodSpec tokenProviderMethodImpl() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java index bdea3ebb52ff..4a0c69f04d71 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java @@ -111,6 +111,10 @@ public TypeSpec poetSpec() { b.addField(s3RegionEndpointSystemPropertySaveValueField()); } + if (serviceHasNoMatchingTestCases()) { + return b.build(); + } + b.addMethod(methodSetupMethod()); b.addMethod(teardownMethod()); @@ -215,16 +219,22 @@ private MethodSpec syncTestsSourceMethod() { .addModifiers(Modifier.PRIVATE, Modifier.STATIC) .returns(ParameterizedTypeName.get(List.class, SyncTestCase.class)); + + + b.addCode("return $T.asList(", Arrays.class); EndpointTestSuiteModel endpointTestSuiteModel = model.getEndpointTestSuiteModel(); Iterator testIter = endpointTestSuiteModel.getTestCases().iterator(); - + boolean isFirst = true; while (testIter.hasNext()) { EndpointTestModel test = testIter.next(); - - if (test.getOperationInputs() != null) { + if (testCaseHasOperationInputs(test)) { Iterator operationInputsIter = test.getOperationInputs().iterator(); + if (!isFirst) { + b.addCode(", "); + } + isFirst = false; while (operationInputsIter.hasNext()) { OperationInput opInput = operationInputsIter.next(); OperationModel opModel = model.getOperation(opInput.getOperationName()); @@ -240,7 +250,11 @@ private MethodSpec syncTestsSourceMethod() { b.addCode(","); } } - } else { + } else if (shouldGenerateClientTestsOverride()) { + if (!isFirst) { + b.addCode(", "); + } + isFirst = false; b.addCode("new $T($S, $L, $L$L)", SyncTestCase.class, test.getDocumentation(), @@ -248,10 +262,6 @@ private MethodSpec syncTestsSourceMethod() { TestGeneratorUtils.createExpect(test.getExpect(), defaultOpModel, null), getSkipReasonBlock(test.getDocumentation())); } - - if (testIter.hasNext()) { - b.addCode(","); - } } b.addStatement(")"); @@ -365,12 +375,16 @@ private MethodSpec asyncTestsSourceMethod() { EndpointTestSuiteModel endpointTestSuiteModel = model.getEndpointTestSuiteModel(); Iterator testIter = endpointTestSuiteModel.getTestCases().iterator(); - + boolean isFirst = true; while (testIter.hasNext()) { EndpointTestModel test = testIter.next(); - if (test.getOperationInputs() != null) { + if (testCaseHasOperationInputs(test)) { Iterator operationInputsIter = test.getOperationInputs().iterator(); + if (!isFirst) { + b.addCode(", "); + } + isFirst = false; while (operationInputsIter.hasNext()) { OperationInput opInput = operationInputsIter.next(); OperationModel opModel = model.getOperation(opInput.getOperationName()); @@ -386,7 +400,11 @@ private MethodSpec asyncTestsSourceMethod() { b.addCode(","); } } - } else { + } else if (shouldGenerateClientTestsOverride()) { + if (!isFirst) { + b.addCode(", "); + } + isFirst = false; b.addCode("new $T($S, $L, $L$L)", AsyncTestCase.class, test.getDocumentation(), @@ -394,10 +412,6 @@ private MethodSpec asyncTestsSourceMethod() { TestGeneratorUtils.createExpect(test.getExpect(), defaultOpModel, null), getSkipReasonBlock(test.getDocumentation())); } - - if (testIter.hasNext()) { - b.addCode(","); - } } b.addStatement(")"); @@ -649,6 +663,27 @@ private Map getSkippedTests() { return skippedTests; } + private boolean serviceHasNoMatchingTestCases() { + boolean noTestCasesHaveOperationInputs = model.getEndpointTestSuiteModel().getTestCases().stream() + .noneMatch(EndpointRulesClientTestSpec::testCaseHasOperationInputs); + return noTestCasesHaveOperationInputs && !shouldGenerateClientTestsOverride(); + } + + /** + * Always generate client endpoint tests if the test case has operation inputs + */ + private static boolean testCaseHasOperationInputs(EndpointTestModel test) { + return test.getOperationInputs() != null; + } + + /** + * Some services can run tests without operation inputs if there are other conditions that allow + * codegen to create a functioning test case + */ + private boolean shouldGenerateClientTestsOverride() { + return model.getCustomizationConfig().isGenerateEndpointClientTests(); + } + private CodeBlock getSkipReasonBlock(String testName) { if (getSkippedTests().containsKey(testName)) { Validate.notNull(getSkippedTests().get(testName), "Test %s must have a reason for skipping", testName); diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/AwsEndpointProviderUtils.java.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/AwsEndpointProviderUtils.java.resource index ee11d604d2a2..db40f3f672e7 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/AwsEndpointProviderUtils.java.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/AwsEndpointProviderUtils.java.resource @@ -150,19 +150,33 @@ public final class AwsEndpointProviderUtils { String clientEndpointPath = clientEndpoint.getRawPath(); // [client endpoint path]/[request path] - String requestPath = request.getUri().getRawPath(); + String requestPath = request.encodedPath(); // [client endpoint path]/[additional path added by resolver] String resolvedUriPath = resolvedUri.getRawPath(); - // our goal is to construct [client endpoint path]/[additional path added by resolver]/[request path], so we - // just need to strip the client endpoint path from the marshalled request path to isolate just the part added - // by the marshaller - String requestPathWithClientPathRemoved = StringUtils.replaceOnce(requestPath, clientEndpointPath, ""); - String finalPath = SdkHttpUtils.appendUri(resolvedUriPath, requestPathWithClientPathRemoved); + String finalPath = requestPath; + + // If there is an additional path added by resolver, i.e., [additional path added by resolver] not null, + // we need to combine the path + if (!resolvedUriPath.equals(clientEndpointPath)) { + finalPath = combinePath(clientEndpointPath, requestPath, resolvedUriPath); + } return request.toBuilder().protocol(resolvedUri.getScheme()).host(resolvedUri.getHost()).port(resolvedUri.getPort()) - .encodedPath(finalPath).build(); + .encodedPath(finalPath).build(); + } + + /** + * Our goal is to construct [client endpoint path]/[additional path added by resolver]/[request path], so we just need to + * strip the client endpoint path from the marshalled request path to isolate just the part added by the marshaller. Trailing + * slash is removed from client endpoint path before stripping because it could cause the leading slash to be removed from the + * request path: e.g., StringUtils.replaceOnce("/", "//test", "") generates "/test" and the expected result is "//test" + */ + private static String combinePath(String clientEndpointPath, String requestPath, String resolvedUriPath) { + String requestPathWithClientPathRemoved = StringUtils.replaceOnce(requestPath, clientEndpointPath, ""); + String finalPath = SdkHttpUtils.appendUri(resolvedUriPath, requestPathWithClientPathRemoved); + return finalPath; } public static AwsRequest addHeaders(AwsRequest request, Map> headers) { diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index 5d5d39a6c9d5..2018b804f3d7 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -187,6 +187,17 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { } } ], "version" : "1.1" } \ No newline at end of file diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java b/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java new file mode 100644 index 000000000000..6bd5206d9b11 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java @@ -0,0 +1,12 @@ +package software.amazon.awssdk.codegen.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.poet.builder.BuilderClassTest; + +/** + * Empty no-op test interceptor for query protocols to view generated code in test-query-sync-client-builder-class.java and + * test-query-async-client-builder-class.java and validate in {@link BuilderClassTest}. + */ +@SdkInternalApi +public class QueryProtocolCustomTestInterceptor { +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java index b111e47bf3c0..3edafd55dab3 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java @@ -58,6 +58,16 @@ public void baseQueryClientBuilderClass() throws Exception { validateQueryGeneration(BaseClientBuilderClass::new, "test-query-client-builder-class.java"); } + @Test + public void syncQueryClientBuilderClass() throws Exception { + validateQueryGeneration(SyncClientBuilderClass::new, "test-query-sync-client-builder-class.java"); + } + + @Test + public void asyncQueryClientBuilderClass() throws Exception { + validateQueryGeneration(AsyncClientBuilderClass::new, "test-query-async-client-builder-class.java"); + } + @Test public void syncClientBuilderInterface() throws Exception { validateGeneration(SyncClientBuilderInterface::new, "test-sync-client-builder-interface.java"); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java new file mode 100644 index 000000000000..f71429db299c --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java @@ -0,0 +1,61 @@ +package software.amazon.awssdk.services.query; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +/** + * Internal implementation of {@link QueryAsyncClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultQueryAsyncClientBuilder extends DefaultQueryBaseClientBuilder + implements QueryAsyncClientBuilder { + @Override + public DefaultQueryAsyncClientBuilder endpointProvider(QueryEndpointProvider endpointProvider) { + clientConfiguration.option(SdkClientOption.ENDPOINT_PROVIDER, endpointProvider); + return this; + } + + @Override + public DefaultQueryAsyncClientBuilder tokenProvider(SdkTokenProvider tokenProvider) { + clientConfiguration.option(AwsClientOption.TOKEN_PROVIDER, tokenProvider); + return this; + } + + @Override + protected final QueryAsyncClient buildClient() { + SdkClientConfiguration clientConfiguration = super.asyncClientConfiguration(); + List interceptors = clientConfiguration.option(SdkClientOption.EXECUTION_INTERCEPTORS); + List queryParamsToBodyInterceptor = Collections + .singletonList(new QueryParametersToBodyInterceptor()); + List customizationInterceptors = new ArrayList<>(); + customizationInterceptors.add(new QueryProtocolCustomTestInterceptor()); + interceptors = CollectionUtils.mergeLists(queryParamsToBodyInterceptor, interceptors); + interceptors = CollectionUtils.mergeLists(customizationInterceptors, interceptors); + clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) + .build(); + this.validateClientOptions(clientConfiguration); + URI endpointOverride = null; + if (clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN) != null + && Boolean.TRUE.equals(clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN))) { + endpointOverride = clientConfiguration.option(SdkClientOption.ENDPOINT); + } + QueryServiceClientConfiguration serviceClientConfiguration = QueryServiceClientConfiguration.builder() + .overrideConfiguration(overrideConfiguration()).region(clientConfiguration.option(AwsClientOption.AWS_REGION)) + .endpointOverride(endpointOverride).build(); + return new DefaultQueryAsyncClient(serviceClientConfiguration, clientConfiguration); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java index 141b27f6cfe0..e1b5cf7bf055 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java @@ -1,7 +1,6 @@ package software.amazon.awssdk.services.query; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -17,7 +16,6 @@ import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.signer.Signer; -import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; import software.amazon.awssdk.services.query.endpoints.QueryClientContextParams; import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; import software.amazon.awssdk.services.query.endpoints.internal.QueryEndpointAuthSchemeInterceptor; @@ -64,8 +62,6 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); - List protocolInterceptors = Collections.singletonList(new QueryParametersToBodyInterceptor()); - interceptors = CollectionUtils.mergeLists(interceptors, protocolInterceptors); return config.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) .option(SdkClientOption.CLIENT_CONTEXT_PARAMS, clientContextParams.build()).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java new file mode 100644 index 000000000000..56b94d1d3189 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java @@ -0,0 +1,61 @@ +package software.amazon.awssdk.services.query; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +/** + * Internal implementation of {@link QueryClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultQueryClientBuilder extends DefaultQueryBaseClientBuilder implements + QueryClientBuilder { + @Override + public DefaultQueryClientBuilder endpointProvider(QueryEndpointProvider endpointProvider) { + clientConfiguration.option(SdkClientOption.ENDPOINT_PROVIDER, endpointProvider); + return this; + } + + @Override + public DefaultQueryClientBuilder tokenProvider(SdkTokenProvider tokenProvider) { + clientConfiguration.option(AwsClientOption.TOKEN_PROVIDER, tokenProvider); + return this; + } + + @Override + protected final QueryClient buildClient() { + SdkClientConfiguration clientConfiguration = super.syncClientConfiguration(); + List interceptors = clientConfiguration.option(SdkClientOption.EXECUTION_INTERCEPTORS); + List queryParamsToBodyInterceptor = Collections + .singletonList(new QueryParametersToBodyInterceptor()); + List customizationInterceptors = new ArrayList<>(); + customizationInterceptors.add(new QueryProtocolCustomTestInterceptor()); + interceptors = CollectionUtils.mergeLists(queryParamsToBodyInterceptor, interceptors); + interceptors = CollectionUtils.mergeLists(customizationInterceptors, interceptors); + clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) + .build(); + this.validateClientOptions(clientConfiguration); + URI endpointOverride = null; + if (clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN) != null + && Boolean.TRUE.equals(clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN))) { + endpointOverride = clientConfiguration.option(SdkClientOption.ENDPOINT); + } + QueryServiceClientConfiguration serviceClientConfiguration = QueryServiceClientConfiguration.builder() + .overrideConfiguration(overrideConfiguration()).region(clientConfiguration.option(AwsClientOption.AWS_REGION)) + .endpointOverride(endpointOverride).build(); + return new DefaultQueryClient(serviceClientConfiguration, clientConfiguration); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config index c95b6d2e5f63..18824fa00a30 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config @@ -2,7 +2,10 @@ "authPolicyActions" : { "skip" : true }, - "skipEndpointTests": { + "skipEndpointTests": { "test case 4": "Does not work" - } + }, + "interceptors": [ + "software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor" + ] } \ No newline at end of file diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules/endpoint-rules-test-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules/endpoint-rules-test-class.java index fdcaad2a56a0..8872858320db 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules/endpoint-rules-test-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules/endpoint-rules-test-class.java @@ -18,7 +18,6 @@ import software.amazon.awssdk.services.query.QueryAsyncClientBuilder; import software.amazon.awssdk.services.query.QueryClient; import software.amazon.awssdk.services.query.QueryClientBuilder; -import software.amazon.awssdk.services.query.model.APostOperationRequest; import software.amazon.awssdk.services.query.model.ChecksumStructure; import software.amazon.awssdk.services.query.model.OperationWithContextParamRequest; @@ -47,26 +46,6 @@ public void asyncClient_usesCorrectEndpoint(AsyncTestCase tc) { private static List syncTestCases() { return Arrays.asList( - new SyncTestCase("test case 1", () -> { - QueryClientBuilder builder = QueryClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getSyncHttpClient()); - builder.region(Region.of("us-east-1")); - APostOperationRequest request = APostOperationRequest.builder().build(); - builder.build().aPostOperation(request); - }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://foo-myservice.aws")).build()).build()), - new SyncTestCase("test case 2", () -> { - QueryClientBuilder builder = QueryClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getSyncHttpClient()); - builder.region(Region.of("us-east-1")); - builder.booleanContextParam(true); - builder.stringContextParam("this is a test"); - APostOperationRequest request = APostOperationRequest.builder().build(); - builder.build().aPostOperation(request); - }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://foo-myservice.aws")).build()).build()), new SyncTestCase("test case 3", () -> { QueryClientBuilder builder = QueryClient.builder(); builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); @@ -88,14 +67,6 @@ private static List syncTestCases() { builder.build().operationWithContextParam(request); }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://myservice.aws")).build()).build(), "Does not work"), - new SyncTestCase("For region us-iso-west-1 with FIPS enabled and DualStack enabled", () -> { - QueryClientBuilder builder = QueryClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getSyncHttpClient()); - APostOperationRequest request = APostOperationRequest.builder().build(); - builder.build().aPostOperation(request); - }, Expect.builder().error("Should have been skipped!").build(), "Client builder does the validation"), new SyncTestCase("Has complex operation input", () -> { QueryClientBuilder builder = QueryClient.builder(); builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); @@ -104,39 +75,11 @@ private static List syncTestCases() { OperationWithContextParamRequest request = OperationWithContextParamRequest.builder() .nestedMember(ChecksumStructure.builder().checksumMode("foo").build()).build(); builder.build().operationWithContextParam(request); - }, Expect.builder().error("Missing info").build()), new SyncTestCase("Has has undeclared input parameter", - () -> { - QueryClientBuilder builder = QueryClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getSyncHttpClient()); - APostOperationRequest request = APostOperationRequest.builder().build(); - builder.build().aPostOperation(request); - }, Expect.builder().error("Missing info").build())); + }, Expect.builder().error("Missing info").build())); } private static List asyncTestCases() { return Arrays.asList( - new AsyncTestCase("test case 1", () -> { - QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getAsyncHttpClient()); - builder.region(Region.of("us-east-1")); - APostOperationRequest request = APostOperationRequest.builder().build(); - return builder.build().aPostOperation(request); - }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://foo-myservice.aws")).build()).build()), - new AsyncTestCase("test case 2", () -> { - QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getAsyncHttpClient()); - builder.region(Region.of("us-east-1")); - builder.booleanContextParam(true); - builder.stringContextParam("this is a test"); - APostOperationRequest request = APostOperationRequest.builder().build(); - return builder.build().aPostOperation(request); - }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://foo-myservice.aws")).build()).build()), new AsyncTestCase("test case 3", () -> { QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); @@ -158,14 +101,6 @@ private static List asyncTestCases() { return builder.build().operationWithContextParam(request); }, Expect.builder().endpoint(Endpoint.builder().url(URI.create("https://myservice.aws")).build()).build(), "Does not work"), - new AsyncTestCase("For region us-iso-west-1 with FIPS enabled and DualStack enabled", () -> { - QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getAsyncHttpClient()); - APostOperationRequest request = APostOperationRequest.builder().build(); - return builder.build().aPostOperation(request); - }, Expect.builder().error("Should have been skipped!").build(), "Client builder does the validation"), new AsyncTestCase("Has complex operation input", () -> { QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); @@ -174,14 +109,6 @@ private static List asyncTestCases() { OperationWithContextParamRequest request = OperationWithContextParamRequest.builder() .nestedMember(ChecksumStructure.builder().checksumMode("foo").build()).build(); return builder.build().operationWithContextParam(request); - }, Expect.builder().error("Missing info").build()), new AsyncTestCase("Has has undeclared input parameter", - () -> { - QueryAsyncClientBuilder builder = QueryAsyncClient.builder(); - builder.credentialsProvider(BaseRuleSetClientTest.CREDENTIALS_PROVIDER); - builder.tokenProvider(BaseRuleSetClientTest.TOKEN_PROVIDER); - builder.httpClient(getAsyncHttpClient()); - APostOperationRequest request = APostOperationRequest.builder().build(); - return builder.build().aPostOperation(request); - }, Expect.builder().error("Missing info").build())); + }, Expect.builder().error("Missing info").build())); } } diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 0511bf3ce9f9..f0ceb1412bd8 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 81fd186432fe..c9fe1f92c5df 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 94ee56fe165c..bc9fd1f4c9bd 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 96d131db7f10..4407ae40b2b5 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java index 8acd9efd02b9..de90e393708f 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsSessionCredentials.java @@ -15,7 +15,9 @@ package software.amazon.awssdk.auth.credentials; +import java.time.Instant; import java.util.Objects; +import java.util.Optional; import software.amazon.awssdk.annotations.Immutable; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.ToString; @@ -34,10 +36,20 @@ public final class AwsSessionCredentials implements AwsCredentials { private final String secretAccessKey; private final String sessionToken; - private AwsSessionCredentials(String accessKey, String secretKey, String sessionToken) { - this.accessKeyId = Validate.paramNotNull(accessKey, "accessKey"); - this.secretAccessKey = Validate.paramNotNull(secretKey, "secretKey"); - this.sessionToken = Validate.paramNotNull(sessionToken, "sessionToken"); + private final Instant expirationTime; + + private AwsSessionCredentials(Builder builder) { + this.accessKeyId = Validate.paramNotNull(builder.accessKeyId, "accessKey"); + this.secretAccessKey = Validate.paramNotNull(builder.secretAccessKey, "secretKey"); + this.sessionToken = Validate.paramNotNull(builder.sessionToken, "sessionToken"); + this.expirationTime = builder.expirationTime; + } + + /** + * Returns a builder for this object. + */ + public static Builder builder() { + return new Builder(); } /** @@ -49,7 +61,7 @@ private AwsSessionCredentials(String accessKey, String secretKey, String session * received temporary permission to access some resource. */ public static AwsSessionCredentials create(String accessKey, String secretKey, String sessionToken) { - return new AwsSessionCredentials(accessKey, secretKey, sessionToken); + return builder().accessKeyId(accessKey).secretAccessKey(secretKey).sessionToken(sessionToken).build(); } /** @@ -68,6 +80,13 @@ public String secretAccessKey() { return secretAccessKey; } + /** + * Retrieve the expiration time of these credentials, if it exists. + */ + public Optional expirationTime() { + return Optional.ofNullable(expirationTime); + } + /** * Retrieve the AWS session token. This token is retrieved from an AWS token service, and is used for authenticating that this * user has received temporary permission to access some resource. @@ -95,7 +114,8 @@ public boolean equals(Object o) { AwsSessionCredentials that = (AwsSessionCredentials) o; return Objects.equals(accessKeyId, that.accessKeyId) && Objects.equals(secretAccessKey, that.secretAccessKey) && - Objects.equals(sessionToken, that.sessionToken); + Objects.equals(sessionToken, that.sessionToken) && + Objects.equals(expirationTime, that.expirationTime().orElse(null)); } @Override @@ -104,6 +124,57 @@ public int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(accessKeyId()); hashCode = 31 * hashCode + Objects.hashCode(secretAccessKey()); hashCode = 31 * hashCode + Objects.hashCode(sessionToken()); + hashCode = 31 * hashCode + Objects.hashCode(expirationTime); return hashCode; } + + /** + * A builder for creating an instance of {@link AwsSessionCredentials}. This can be created with the static + * {@link #builder()} method. + */ + public static final class Builder { + private String accessKeyId; + private String secretAccessKey; + private String sessionToken; + private Instant expirationTime; + + /** + * The AWS access key, used to identify the user interacting with services. Required. + */ + public Builder accessKeyId(String accessKeyId) { + this.accessKeyId = accessKeyId; + return this; + } + + /** + * The AWS secret access key, used to authenticate the user interacting with services. Required + */ + public Builder secretAccessKey(String secretAccessKey) { + this.secretAccessKey = secretAccessKey; + return this; + } + + /** + * The AWS session token, retrieved from an AWS token service, used for authenticating that this user has + * received temporary permission to access some resource. Required + */ + public Builder sessionToken(String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + /** + * The time after which this identity will no longer be valid. If this is empty, + * an expiration time is not known (but the identity may still expire at some + * time in the future). + */ + public Builder expirationTime(Instant expirationTime) { + this.expirationTime = expirationTime; + return this; + } + + public AwsSessionCredentials build() { + return new AwsSessionCredentials(this); + } + } } diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java index 8e9c6a9ee975..e0ccc19c5954 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/AwsSessionCredentialsTest.java @@ -15,24 +15,64 @@ package software.amazon.awssdk.auth.credentials.internal; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.assertj.core.api.Assertions.assertThat; - +import nl.jqno.equalsverifier.EqualsVerifier; import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; public class AwsSessionCredentialsTest { + private static final String ACCESS_KEY_ID = "accessKeyId"; + private static final String SECRET_ACCESS_KEY = "secretAccessKey"; + private static final String SESSION_TOKEN = "sessionToken"; + + public void equalsHashcode() { + EqualsVerifier.forClass(AwsSessionCredentials.class) + .verify(); + } + + @Test + public void emptyBuilder_ThrowsException() { + assertThrows(NullPointerException.class, () -> AwsSessionCredentials.builder().build()); + } + + @Test + public void builderMissingSessionToken_ThrowsException() { + assertThrows(NullPointerException.class, () -> AwsSessionCredentials.builder() + .accessKeyId(ACCESS_KEY_ID) + .secretAccessKey(SECRET_ACCESS_KEY) + .build()); + } @Test - public void equalsHashCode() { - AwsSessionCredentials credentials = - AwsSessionCredentials.create("test", "key", "sessionToken"); - - AwsSessionCredentials anotherCredentials = - AwsSessionCredentials.create("test", "key", "sessionToken"); - assertThat(credentials).isEqualTo(anotherCredentials); - assertThat(credentials.hashCode()).isEqualTo(anotherCredentials.hashCode()); + public void builderMissingAccessKeyId_ThrowsException() { + assertThrows(NullPointerException.class, () -> AwsSessionCredentials.builder() + .secretAccessKey(SECRET_ACCESS_KEY) + .sessionToken(SESSION_TOKEN) + .build()); } + @Test + public void create_isSuccessful() { + AwsSessionCredentials identity = AwsSessionCredentials.create(ACCESS_KEY_ID, + SECRET_ACCESS_KEY, + SESSION_TOKEN); + assertEquals(ACCESS_KEY_ID, identity.accessKeyId()); + assertEquals(SECRET_ACCESS_KEY, identity.secretAccessKey()); + assertEquals(SESSION_TOKEN, identity.sessionToken()); + } + + @Test + public void build_isSuccessful() { + AwsSessionCredentials identity = AwsSessionCredentials.builder() + .accessKeyId(ACCESS_KEY_ID) + .secretAccessKey(SECRET_ACCESS_KEY) + .sessionToken(SESSION_TOKEN) + .build(); + assertEquals(ACCESS_KEY_ID, identity.accessKeyId()); + assertEquals(SECRET_ACCESS_KEY, identity.secretAccessKey()); + assertEquals(SESSION_TOKEN, identity.sessionToken()); + } } diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 2501a9197b12..8669fb99527d 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptor.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptor.java index 00301a89b2f8..f8091c3b4e94 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptor.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptor.java @@ -17,6 +17,7 @@ import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.internal.interceptor.TracingSystemSetting; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -24,13 +25,12 @@ import software.amazon.awssdk.utils.SystemSetting; /** - * The {@code TraceIdExecutionInterceptor} copies the {@link #TRACE_ID_ENVIRONMENT_VARIABLE} value to the - * {@link #TRACE_ID_HEADER} header, assuming we seem to be running in a lambda environment. + * The {@code TraceIdExecutionInterceptor} copies the trace details to the {@link #TRACE_ID_HEADER} header, assuming we seem to + * be running in a lambda environment. */ @SdkInternalApi public class TraceIdExecutionInterceptor implements ExecutionInterceptor { private static final String TRACE_ID_HEADER = "X-Amzn-Trace-Id"; - private static final String TRACE_ID_ENVIRONMENT_VARIABLE = "_X_AMZN_TRACE_ID"; private static final String LAMBDA_FUNCTION_NAME_ENVIRONMENT_VARIABLE = "AWS_LAMBDA_FUNCTION_NAME"; @Override @@ -38,7 +38,7 @@ public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, Execu Optional traceIdHeader = traceIdHeader(context); if (!traceIdHeader.isPresent()) { Optional lambdafunctionName = lambdaFunctionNameEnvironmentVariable(); - Optional traceId = traceIdEnvironmentVariable(); + Optional traceId = traceId(); if (lambdafunctionName.isPresent() && traceId.isPresent()) { return context.httpRequest().copy(r -> r.putHeader(TRACE_ID_HEADER, traceId.get())); @@ -52,10 +52,8 @@ private Optional traceIdHeader(Context.ModifyHttpRequest context) { return context.httpRequest().firstMatchingHeader(TRACE_ID_HEADER); } - private Optional traceIdEnvironmentVariable() { - // CHECKSTYLE:OFF - This is not configured by the customer, so it should not be configurable by system property - return SystemSetting.getStringValueFromEnvironmentVariable(TRACE_ID_ENVIRONMENT_VARIABLE); - // CHECKSTYLE:ON + private Optional traceId() { + return TracingSystemSetting._X_AMZN_TRACE_ID.getStringValue(); } private Optional lambdaFunctionNameEnvironmentVariable() { diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/interceptor/TracingSystemSetting.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/interceptor/TracingSystemSetting.java new file mode 100644 index 000000000000..6f412e9a83a5 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/interceptor/TracingSystemSetting.java @@ -0,0 +1,51 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.internal.interceptor; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.SystemSetting; + +/** + * Tracing specific System Setting. + */ +@SdkInternalApi +public enum TracingSystemSetting implements SystemSetting { + // See: https://github.com/aws/aws-xray-sdk-java/issues/251 + _X_AMZN_TRACE_ID("com.amazonaws.xray.traceHeader", null); + + private final String systemProperty; + private final String defaultValue; + + TracingSystemSetting(String systemProperty, String defaultValue) { + this.systemProperty = systemProperty; + this.defaultValue = defaultValue; + } + + @Override + public String property() { + return systemProperty; + } + + @Override + public String environmentVariable() { + return name(); + } + + @Override + public String defaultValue() { + return defaultValue; + } +} diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptorTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptorTest.java index 408108ab22b6..b3f965a490fc 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptorTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/interceptor/TraceIdExecutionInterceptorTest.java @@ -18,6 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat; import java.net.URI; +import java.util.Properties; import org.junit.jupiter.api.Test; import org.mockito.Mockito; import software.amazon.awssdk.core.SdkRequest; @@ -49,6 +50,32 @@ public void headerAddedWithEnvSettings() { }); } + + @Test + public void headerAddedWithSysPropWhenNoEnvSettings() { + EnvironmentVariableHelper.run(env -> { + resetRelevantEnvVars(env); + env.set("AWS_LAMBDA_FUNCTION_NAME", "foo"); + Properties props = System.getProperties(); + props.setProperty("com.amazonaws.xray.traceHeader", "sys-prop"); + Context.ModifyHttpRequest context = context(); + assertThat(modifyHttpRequest(context).firstMatchingHeader("X-Amzn-Trace-Id")).hasValue("sys-prop"); + }); + } + + @Test + public void headerAddedWithEnvVariableValueWhenBothEnvAndSysPropAreSet() { + EnvironmentVariableHelper.run(env -> { + resetRelevantEnvVars(env); + env.set("AWS_LAMBDA_FUNCTION_NAME", "foo"); + env.set("_X_AMZN_TRACE_ID", "bar"); + Properties props = System.getProperties(); + props.setProperty("com.amazonaws.xray.traceHeader", "sys-prop"); + Context.ModifyHttpRequest context = context(); + assertThat(modifyHttpRequest(context).firstMatchingHeader("X-Amzn-Trace-Id")).hasValue("sys-prop"); + }); + } + @Test public void headerNotAddedIfHeaderAlreadyExists() { EnvironmentVariableHelper.run(env -> { diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index ca16db949f29..9789a22ba291 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index 7f634278913e..691b6e750104 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/imds/pom.xml b/core/imds/pom.xml index cc7593a38c21..7184e284e49c 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index d8cc787f39f6..e40e2197b535 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 08a64f3a754b..3c8981505c06 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index 65368acaf0b2..a1f005cca382 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 5e6b7c1f0bbf..6c24b03da2bd 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT profiles diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java index 50e4c80ecc58..6fa45ecbe4fb 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java @@ -433,8 +433,9 @@ void aggregate_duplicateOptionsGivenFixedProfileFirst_preservesPrecedence() { } @Test - void aggregate_duplicateOptionsGivenReloadingProfileFirst_preservesPrecedence() { - AdjustableClock clock = new AdjustableClock(); + void aggregate_duplicateOptionsGivenReloadingProfileFirst_preservesPrecedence() throws IOException { + Instant startTime = Instant.now(); + AdjustableClock clock = new AdjustableClock(startTime); ProfileFile configFile1 = configFile("profile default", Pair.of("aws_access_key_id", "config-key")); Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); @@ -452,7 +453,14 @@ void aggregate_duplicateOptionsGivenReloadingProfileFirst_preservesPrecedence() generateTestCredentialsFile("defaultAccessKey2", "defaultSecretAccessKey2"); - clock.tickForward(Duration.ofMillis(1_000)); + Duration tick = Duration.ofMillis(1_000); + + // The refresh logic uses the last modified attribute of the profile file to determine if it's changed and should be + // reloaded; unfortunately that means that if things happen quickly enough, the last modified time of the first version + // of the file, and the new version will be the same. Ensure that there is a change in the last modified time for the + // test file. + Files.setLastModifiedTime(getTestCredentialsFilePath(), FileTime.from(startTime.plus(tick))); + clock.tickForward(tick); profileFile = supplier.get(); accessKeyId = profileFile.profile("default").get().property("aws_access_key_id").get(); @@ -505,10 +513,10 @@ void get_givenOnLoadAction_callsActionOncePerNewProfileFile() { assertThat(blockCount.get()).isEqualTo(actualProfilesCount); } - private Path generateTestFile(String contents, String filename) { + private Path writeTestFile(String contents, Path path) { try { Files.createDirectories(testDirectory); - return Files.write(testDirectory.resolve(filename), contents.getBytes(StandardCharsets.UTF_8)); + return Files.write(path, contents.getBytes(StandardCharsets.UTF_8)); } catch (IOException e) { throw new RuntimeException(e); } @@ -517,7 +525,11 @@ private Path generateTestFile(String contents, String filename) { private Path generateTestCredentialsFile(String accessKeyId, String secretAccessKey) { String contents = String.format("[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n", accessKeyId, secretAccessKey); - return generateTestFile(contents, "credentials.txt"); + return writeTestFile(contents, getTestCredentialsFilePath()); + } + + private Path getTestCredentialsFilePath() { + return testDirectory.resolve("credentials.txt"); } private Path generateTestConfigFile(Pair... pairs) { @@ -526,7 +538,7 @@ private Path generateTestConfigFile(Pair... pairs) { .collect(Collectors.joining(System.lineSeparator())); String contents = String.format("[default]\n%s", values); - return generateTestFile(contents, "config.txt"); + return writeTestFile(contents, testDirectory.resolve("config.txt")); } private void updateModificationTime(Path path, Instant instant) { @@ -597,6 +609,10 @@ private AdjustableClock() { this.time = Instant.now(); } + private AdjustableClock(Instant time) { + this.time = time; + } + @Override public ZoneId getZone() { return ZoneOffset.UTC; diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 89e619340fb2..b2eda66c5b05 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index fa63621c7438..35ad37a367df 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 88b86b99b78e..17752c401a58 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index efd5c1b5cb5b..958b33ba6c55 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 71006e19ac0c..2e842618d8d1 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index e9277738ae7e..54e6811ed212 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index aebfb9c53004..8b6030ece3ea 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 6b9002532d26..344555c7d052 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -1076,6 +1076,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -2005,6 +2006,9 @@ "variants" : [ { "hostname" : "athena-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-east-1.api.aws", "tags" : [ "dualstack" ] @@ -2014,6 +2018,9 @@ "variants" : [ { "hostname" : "athena-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-east-2.api.aws", "tags" : [ "dualstack" ] @@ -2023,6 +2030,9 @@ "variants" : [ { "hostname" : "athena-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-west-1.api.aws", "tags" : [ "dualstack" ] @@ -2032,6 +2042,9 @@ "variants" : [ { "hostname" : "athena-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-west-2.api.aws", "tags" : [ "dualstack" ] @@ -2200,6 +2213,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -2824,6 +2838,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -3199,6 +3214,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3259,6 +3275,13 @@ "deprecated" : true, "hostname" : "cognito-identity-fips.us-east-2.amazonaws.com" }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -3280,7 +3303,12 @@ "tags" : [ "fips" ] } ] }, - "us-west-1" : { }, + "us-west-1" : { + "variants" : [ { + "hostname" : "cognito-identity-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-west-2" : { "variants" : [ { "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com", @@ -4238,6 +4266,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-central-1.amazonaws.com", @@ -4348,12 +4377,24 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "devops-guru-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "devops-guru-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -4368,6 +4409,13 @@ "deprecated" : true, "hostname" : "devops-guru-fips.us-east-2.amazonaws.com" }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "devops-guru-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -4388,7 +4436,12 @@ "tags" : [ "fips" ] } ] }, - "us-west-1" : { }, + "us-west-1" : { + "variants" : [ { + "hostname" : "devops-guru-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-west-2" : { "variants" : [ { "hostname" : "devops-guru-fips.us-west-2.amazonaws.com", @@ -6576,6 +6629,7 @@ } ] }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fms-fips.ca-central-1.amazonaws.com", @@ -6909,6 +6963,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fsx-fips.ca-central-1.amazonaws.com", @@ -7919,11 +7974,6 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, @@ -7943,37 +7993,22 @@ "eu-west-2" : { }, "eu-west-3" : { }, "fips-ca-central-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.ca-central-1.amazonaws.com" }, "fips-us-east-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-east-1.amazonaws.com" }, "fips-us-east-2" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-east-2.amazonaws.com" }, "fips-us-west-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-west-1.amazonaws.com" }, "fips-us-west-2" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-west-2.amazonaws.com" }, @@ -8493,6 +8528,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -8599,6 +8635,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -8684,7 +8721,11 @@ "hostname" : "kendra-ranking.ap-southeast-4.api.aws" }, "ca-central-1" : { - "hostname" : "kendra-ranking.ca-central-1.api.aws" + "hostname" : "kendra-ranking.ca-central-1.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.ca-central-1.api.aws", + "tags" : [ "fips" ] + } ] }, "eu-central-2" : { "hostname" : "kendra-ranking.eu-central-2.api.aws" @@ -8714,16 +8755,28 @@ "hostname" : "kendra-ranking.sa-east-1.api.aws" }, "us-east-1" : { - "hostname" : "kendra-ranking.us-east-1.api.aws" + "hostname" : "kendra-ranking.us-east-1.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-east-1.api.aws", + "tags" : [ "fips" ] + } ] }, "us-east-2" : { - "hostname" : "kendra-ranking.us-east-2.api.aws" + "hostname" : "kendra-ranking.us-east-2.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-east-2.api.aws", + "tags" : [ "fips" ] + } ] }, "us-west-1" : { "hostname" : "kendra-ranking.us-west-1.api.aws" }, "us-west-2" : { - "hostname" : "kendra-ranking.us-west-2.api.aws" + "hostname" : "kendra-ranking.us-west-2.api.aws", + "variants" : [ { + "hostname" : "kendra-ranking-fips.us-west-2.api.aws", + "tags" : [ "fips" ] + } ] } } }, @@ -10265,6 +10318,25 @@ "us-west-2" : { } } }, + "mediapackagev2" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "mediastore" : { "endpoints" : { "ap-northeast-1" : { }, @@ -10413,6 +10485,7 @@ "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10441,13 +10514,17 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -10685,13 +10762,17 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -10975,10 +11056,15 @@ "nimble" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -13001,6 +13087,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -13868,6 +13955,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -13937,6 +14025,8 @@ "securitylake" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -13945,6 +14035,7 @@ "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -16016,8 +16107,11 @@ }, "transcribestreaming" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -16105,6 +16199,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -16115,8 +16210,10 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -16243,6 +16340,37 @@ } } }, + "verifiedpermissions" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "voice-chime" : { "endpoints" : { "ap-northeast-1" : { }, @@ -17616,6 +17744,12 @@ "cn-northwest-1" : { } } }, + "airflow" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "api.ecr" : { "endpoints" : { "cn-north-1" : { @@ -18188,11 +18322,6 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "cn-north-1" : { }, "cn-northwest-1" : { } @@ -19316,6 +19445,9 @@ "variants" : [ { "hostname" : "athena-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-gov-east-1.api.aws", "tags" : [ "dualstack" ] @@ -19325,6 +19457,9 @@ "variants" : [ { "hostname" : "athena-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "athena-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "athena.us-gov-west-1.api.aws", "tags" : [ "dualstack" ] @@ -20976,23 +21111,12 @@ } }, "iot" : { - "defaults" : { - "credentialScope" : { - "service" : "execute-api" - } - }, "endpoints" : { "fips-us-gov-east-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-gov-east-1.amazonaws.com" }, "fips-us-gov-west-1" : { - "credentialScope" : { - "service" : "execute-api" - }, "deprecated" : true, "hostname" : "iot-fips.us-gov-west-1.amazonaws.com" }, @@ -21467,6 +21591,36 @@ "us-gov-west-1" : { } } }, + "mgn" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "mgn-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "mgn-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "models.lex" : { "defaults" : { "credentialScope" : { @@ -22464,6 +22618,12 @@ } } }, + "simspaceweaver" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "sms" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -23099,6 +23259,13 @@ }, "workspaces" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "workspaces-fips.us-gov-east-1.amazonaws.com" + }, "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -23106,7 +23273,12 @@ "deprecated" : true, "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com" }, - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "workspaces-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-gov-west-1" : { "variants" : [ { "hostname" : "workspaces-fips.us-gov-west-1.amazonaws.com", @@ -23230,6 +23402,12 @@ "us-iso-west-1" : { } } }, + "cloudcontrolapi" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "cloudformation" : { "endpoints" : { "us-iso-east-1" : { }, @@ -23273,6 +23451,12 @@ "us-iso-west-1" : { } } }, + "dlm" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "dms" : { "defaults" : { "variants" : [ { @@ -23601,7 +23785,8 @@ }, "route53resolver" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "runtime.sagemaker" : { @@ -23702,7 +23887,8 @@ }, "tagging" : { "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "transcribe" : { @@ -24194,6 +24380,23 @@ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { }, "services" : { } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "csp.hci.ic.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "csp.hci.ic.gov", + "partition" : "aws-iso-f", + "partitionName" : "AWS ISOF", + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { }, + "services" : { } } ], "version" : 3 } \ No newline at end of file diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 0ec62964aee1..f44b4399b61a 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 7a1738f51d97..07dea1568089 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -22,37 +22,38 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.util.Arrays; import java.util.Optional; import java.util.concurrent.ExecutorService; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.core.internal.async.ByteArrayAsyncRequestBody; +import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.internal.async.InputStreamWithExecutorAsyncRequestBody; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.utils.BinaryUtils; /** - * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where - * this interface is the {@link Publisher} of data (specifically {@link ByteBuffer} chunks) and the HTTP client is the Subscriber - * of the data (i.e. to write that data on the wire). + * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where this interface is + * the {@link Publisher} of data (specifically {@link ByteBuffer} chunks) and the HTTP client is the Subscriber of the data (i.e. + * to write that data on the wire). * *

* {@link #subscribe(Subscriber)} should be implemented to tie this publisher to a subscriber. Ideally each call to subscribe - * should reproduce the content (i.e if you are reading from a file each subscribe call should produce a {@link - * org.reactivestreams.Subscription} that reads the file fully). This allows for automatic retries to be performed in the SDK. If - * the content is not reproducible, an exception may be thrown from any subsequent {@link #subscribe(Subscriber)} calls. + * should reproduce the content (i.e if you are reading from a file each subscribe call should produce a + * {@link org.reactivestreams.Subscription} that reads the file fully). This allows for automatic retries to be performed in the + * SDK. If the content is not reproducible, an exception may be thrown from any subsequent {@link #subscribe(Subscriber)} calls. *

* *

- * It is important to only send the number of chunks that the subscriber requests to avoid out of memory situations. - * The subscriber does it's own buffering so it's usually not needed to buffer in the publisher. Additional permits - * for chunks will be notified via the {@link org.reactivestreams.Subscription#request(long)} method. + * It is important to only send the number of chunks that the subscriber requests to avoid out of memory situations. The + * subscriber does it's own buffering so it's usually not needed to buffer in the publisher. Additional permits for chunks will be + * notified via the {@link org.reactivestreams.Subscription#request(long)} method. *

* * @see FileAsyncRequestBody - * @see ByteArrayAsyncRequestBody + * @see ByteBuffersAsyncRequestBody */ @SdkPublicApi public interface AsyncRequestBody extends SdkPublisher { @@ -70,8 +71,8 @@ default String contentType() { } /** - * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. - * The data is delivered when the publisher publishes the data. + * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. The data is delivered when the + * publisher publishes the data. * * @param publisher Publisher of source data * @return Implementation of {@link AsyncRequestBody} that produces data send by the publisher @@ -124,11 +125,11 @@ static AsyncRequestBody fromFile(File file) { * @param string The string to provide. * @param cs The {@link Charset} to use. * @return Implementation of {@link AsyncRequestBody} that uses the specified string. - * @see ByteArrayAsyncRequestBody + * @see ByteBuffersAsyncRequestBody */ static AsyncRequestBody fromString(String string, Charset cs) { - return new ByteArrayAsyncRequestBody(string.getBytes(cs), - Mimetype.MIMETYPE_TEXT_PLAIN + "; charset=" + cs.name()); + return ByteBuffersAsyncRequestBody.from(Mimetype.MIMETYPE_TEXT_PLAIN + "; charset=" + cs.name(), + string.getBytes(cs)); } /** @@ -143,29 +144,181 @@ static AsyncRequestBody fromString(String string) { } /** - * Creates a {@link AsyncRequestBody} from a byte array. The contents of the byte array are copied so modifications to the - * original byte array are not reflected in the {@link AsyncRequestBody}. + * Creates an {@link AsyncRequestBody} from a byte array. This will copy the contents of the byte array to prevent + * modifications to the provided byte array from being reflected in the {@link AsyncRequestBody}. * * @param bytes The bytes to send to the service. * @return AsyncRequestBody instance. */ static AsyncRequestBody fromBytes(byte[] bytes) { - return new ByteArrayAsyncRequestBody(bytes, Mimetype.MIMETYPE_OCTET_STREAM); + byte[] clonedBytes = bytes.clone(); + return ByteBuffersAsyncRequestBody.from(clonedBytes); } /** - * Creates a {@link AsyncRequestBody} from a {@link ByteBuffer}. Buffer contents are copied so any modifications - * made to the original {@link ByteBuffer} are not reflected in the {@link AsyncRequestBody}. + * Creates an {@link AsyncRequestBody} from a byte array without copying the contents of the byte array. This + * introduces concurrency risks, allowing: (1) the caller to modify the byte array stored in this {@code AsyncRequestBody} + * implementation AND (2) any users of {@link #fromBytesUnsafe(byte[])} to modify the byte array passed into this + * {@code AsyncRequestBody} implementation. + * + *

As the method name implies, this is unsafe. Use {@link #fromBytes(byte[])} unless you're sure you know the risks. + * + * @param bytes The bytes to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromBytesUnsafe(byte[] bytes) { + return ByteBuffersAsyncRequestBody.from(bytes); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer}. This will copy the contents of the {@link ByteBuffer} to + * prevent modifications to the provided {@link ByteBuffer} from being reflected in the {@link AsyncRequestBody}. + *

+ * NOTE: This method ignores the current read position. Use {@link #fromRemainingByteBuffer(ByteBuffer)} if you need + * it to copy only the remaining readable bytes. * * @param byteBuffer ByteBuffer to send to the service. * @return AsyncRequestBody instance. */ static AsyncRequestBody fromByteBuffer(ByteBuffer byteBuffer) { - return fromBytes(BinaryUtils.copyAllBytesFrom(byteBuffer)); + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(byteBuffer); + immutableCopy.rewind(); + return ByteBuffersAsyncRequestBody.of((long) immutableCopy.remaining(), immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from the remaining readable bytes from a {@link ByteBuffer}. This will copy the + * remaining contents of the {@link ByteBuffer} to prevent modifications to the provided {@link ByteBuffer} from being + * reflected in the {@link AsyncRequestBody}. + *

Unlike {@link #fromByteBuffer(ByteBuffer)}, this method respects the current read position of the buffer and reads + * only the remaining bytes. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffer(ByteBuffer byteBuffer) { + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(byteBuffer); + return ByteBuffersAsyncRequestBody.of((long) immutableCopy.remaining(), immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} without copying the contents of the + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify the {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

+ * NOTE: This method ignores the current read position. Use {@link #fromRemainingByteBufferUnsafe(ByteBuffer)} if you + * need it to copy only the remaining readable bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffer(ByteBuffer)}} unless you're sure you know the + * risks. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBufferUnsafe(ByteBuffer byteBuffer) { + ByteBuffer readOnlyBuffer = byteBuffer.asReadOnlyBuffer(); + readOnlyBuffer.rewind(); + return ByteBuffersAsyncRequestBody.of((long) readOnlyBuffer.remaining(), readOnlyBuffer); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} without copying the contents of the + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify the {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

Unlike {@link #fromByteBufferUnsafe(ByteBuffer)}, this method respects the current read position of + * the buffer and reads only the remaining bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffer(ByteBuffer)}} unless you're sure you know the + * risks. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBufferUnsafe(ByteBuffer byteBuffer) { + ByteBuffer readOnlyBuffer = byteBuffer.asReadOnlyBuffer(); + return ByteBuffersAsyncRequestBody.of((long) readOnlyBuffer.remaining(), readOnlyBuffer); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array. This will copy the contents of each {@link ByteBuffer} + * to prevent modifications to any provided {@link ByteBuffer} from being reflected in the {@link AsyncRequestBody}. + *

+ * NOTE: This method ignores the current read position of each {@link ByteBuffer}. Use + * {@link #fromRemainingByteBuffers(ByteBuffer...)} if you need it to copy only the remaining readable bytes. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBuffers(ByteBuffer... byteBuffers) { + ByteBuffer[] immutableCopy = Arrays.stream(byteBuffers) + .map(BinaryUtils::immutableCopyOf) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array. This will copy the remaining contents of each + * {@link ByteBuffer} to prevent modifications to any provided {@link ByteBuffer} from being reflected in the + * {@link AsyncRequestBody}. + *

Unlike {@link #fromByteBufferUnsafe(ByteBuffer)}, + * this method respects the current read position of each buffer and reads only the remaining bytes. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffers(ByteBuffer... byteBuffers) { + ByteBuffer[] immutableCopy = Arrays.stream(byteBuffers) + .map(BinaryUtils::immutableCopyOfRemaining) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array without copying the contents of each + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify any {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

+ * NOTE: This method ignores the current read position of each {@link ByteBuffer}. Use + * {@link #fromRemainingByteBuffers(ByteBuffer...)} if you need it to copy only the remaining readable bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffers(ByteBuffer...)} unless you're sure you know the + * risks. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBuffersUnsafe(ByteBuffer... byteBuffers) { + ByteBuffer[] readOnlyBuffers = Arrays.stream(byteBuffers) + .map(ByteBuffer::asReadOnlyBuffer) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(readOnlyBuffers); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array without copying the contents of each + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify any {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

Unlike {@link #fromByteBuffersUnsafe(ByteBuffer...)}, + * this method respects the current read position of each buffer and reads only the remaining bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffers(ByteBuffer...)} unless you're sure you know the + * risks. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffersUnsafe(ByteBuffer... byteBuffers) { + ByteBuffer[] readOnlyBuffers = Arrays.stream(byteBuffers) + .map(ByteBuffer::asReadOnlyBuffer) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(readOnlyBuffers); } /** - * Creates a {@link AsyncRequestBody} from a {@link InputStream}. + * Creates an {@link AsyncRequestBody} from an {@link InputStream}. * *

An {@link ExecutorService} is required in order to perform the blocking data reads, to prevent blocking the * non-blocking event loop threads owned by the SDK. @@ -239,7 +392,7 @@ static BlockingOutputStreamAsyncRequestBody forBlockingOutputStream(Long content } /** - * Creates a {@link AsyncRequestBody} with no content. + * Creates an {@link AsyncRequestBody} with no content. * * @return AsyncRequestBody instance. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index c91ad39ad1a3..18fcc1e52f2e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -91,6 +91,7 @@ import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Either; +import software.amazon.awssdk.utils.ScheduledExecutorUtils; import software.amazon.awssdk.utils.ThreadFactoryBuilder; import software.amazon.awssdk.utils.Validate; @@ -222,6 +223,7 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration SdkClientConfiguration.Builder builder = configuration.toBuilder(); + builder.option(SCHEDULED_EXECUTOR_SERVICE, clientOverrideConfiguration.scheduledExecutorService().orElse(null)); builder.option(EXECUTION_INTERCEPTORS, clientOverrideConfiguration.executionInterceptors()); builder.option(RETRY_POLICY, clientOverrideConfiguration.retryPolicy().orElse(null)); builder.option(ADDITIONAL_HTTP_HEADERS, clientOverrideConfiguration.headers()); @@ -313,7 +315,7 @@ private SdkClientConfiguration finalizeAsyncConfiguration(SdkClientConfiguration private SdkClientConfiguration finalizeConfiguration(SdkClientConfiguration config) { RetryPolicy retryPolicy = resolveRetryPolicy(config); return config.toBuilder() - .option(SCHEDULED_EXECUTOR_SERVICE, resolveScheduledExecutorService()) + .option(SCHEDULED_EXECUTOR_SERVICE, resolveScheduledExecutorService(config)) .option(EXECUTION_INTERCEPTORS, resolveExecutionInterceptors(config)) .option(RETRY_POLICY, retryPolicy) .option(CLIENT_USER_AGENT, resolveClientUserAgent(config, retryPolicy)) @@ -410,9 +412,17 @@ private Executor resolveAsyncFutureCompletionExecutor(SdkClientConfiguration con * Finalize the internal SDK scheduled executor service that is used for scheduling tasks such * as async retry attempts and timeout task. */ - private ScheduledExecutorService resolveScheduledExecutorService() { - return Executors.newScheduledThreadPool(5, new ThreadFactoryBuilder() - .threadNamePrefix("sdk-ScheduledExecutor").build()); + private ScheduledExecutorService resolveScheduledExecutorService(SdkClientConfiguration config) { + Supplier defaultScheduledExecutor = () -> { + ScheduledExecutorService executor = Executors.newScheduledThreadPool(5, new ThreadFactoryBuilder() + .threadNamePrefix("sdk-ScheduledExecutor").build()); + + return executor; + }; + + return Optional.ofNullable(config.option(SCHEDULED_EXECUTOR_SERVICE)) + .map(ScheduledExecutorUtils::unmanagedScheduledExecutor) + .orElseGet(defaultScheduledExecutor); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 4ba034413b90..83cf2317038d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; +import java.util.concurrent.ScheduledExecutorService; import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ToBuilderIgnoreField; @@ -62,6 +63,7 @@ public final class ClientOverrideConfiguration private final String defaultProfileName; private final List metricPublishers; private final ExecutionAttributes executionAttributes; + private final ScheduledExecutorService scheduledExecutorService; /** * Initialize this configuration. Private to require use of {@link #builder()}. @@ -77,6 +79,7 @@ private ClientOverrideConfiguration(Builder builder) { this.defaultProfileName = builder.defaultProfileName(); this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); + this.scheduledExecutorService = builder.scheduledExecutorService(); } @Override @@ -92,7 +95,8 @@ public Builder toBuilder() { .defaultProfileFile(defaultProfileFile) .defaultProfileName(defaultProfileName) .executionAttributes(executionAttributes) - .metricPublishers(metricPublishers); + .metricPublishers(metricPublishers) + .scheduledExecutorService(scheduledExecutorService); } /** @@ -141,6 +145,17 @@ public List executionInterceptors() { return executionInterceptors; } + /** + * The optional scheduled executor service that should be used for scheduling tasks such as async retry attempts + * and timeout task. + *

+ * The SDK will not automatically close the executor when the client is closed. It is the responsibility of the + * user to manually close the executor once all clients utilizing it have been closed. + */ + public Optional scheduledExecutorService() { + return Optional.ofNullable(scheduledExecutorService); + } + /** * The amount of time to allow the client to complete the execution of an API call. This timeout covers the entire client * execution except for marshalling. This includes request handler execution, all HTTP requests including retries, @@ -226,6 +241,7 @@ public String toString() { .add("advancedOptions", advancedOptions) .add("profileFile", defaultProfileFile) .add("profileName", defaultProfileName) + .add("scheduledExecutorService", scheduledExecutorService) .build(); } @@ -338,6 +354,20 @@ default Builder retryPolicy(RetryMode retryMode) { List executionInterceptors(); + /** + * Configure the scheduled executor service that should be used for scheduling tasks such as async retry attempts + * and timeout task. + * + *

+ * The SDK will not automatically close the executor when the client is closed. It is the responsibility of the + * user to manually close the executor once all clients utilizing it have been closed. + * + * @see ClientOverrideConfiguration#scheduledExecutorService() + */ + Builder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService); + + ScheduledExecutorService scheduledExecutorService(); + /** * Configure an advanced override option. These values are used very rarely, and the majority of SDK customers can ignore * them. @@ -499,6 +529,7 @@ private static final class DefaultClientOverrideConfigurationBuilder implements private String defaultProfileName; private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributes = ExecutionAttributes.builder(); + private ScheduledExecutorService scheduledExecutorService; @Override public Builder headers(Map> headers) { @@ -561,6 +592,18 @@ public List executionInterceptors() { return Collections.unmodifiableList(executionInterceptors); } + @Override + public ScheduledExecutorService scheduledExecutorService() + { + return scheduledExecutorService; + } + + @Override + public Builder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService) { + this.scheduledExecutorService = scheduledExecutorService; + return this; + } + @Override public Builder putAdvancedOption(SdkAdvancedClientOption option, T value) { this.advancedOptions.put(option, value); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 94f78e64a0df..6e71448dc98f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -67,7 +67,8 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute ENDPOINT_OVERRIDDEN = new ExecutionAttribute<>("EndpointOverridden"); /** - * This is the endpointOverride (if {@link #ENDPOINT_OVERRIDDEN} is true), otherwise null. + * This is the endpointOverride (if {@link #ENDPOINT_OVERRIDDEN} is true), otherwise the endpoint generated from regional + * metadata. */ public static final ExecutionAttribute CLIENT_ENDPOINT = new ExecutionAttribute<>("EndpointOverride"); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java deleted file mode 100644 index 29205479b798..000000000000 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.internal.async; - -import java.nio.ByteBuffer; -import java.util.Optional; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.utils.Logger; - -/** - * An implementation of {@link AsyncRequestBody} for providing data from memory. This is created using static - * methods on {@link AsyncRequestBody} - * - * @see AsyncRequestBody#fromBytes(byte[]) - * @see AsyncRequestBody#fromByteBuffer(ByteBuffer) - * @see AsyncRequestBody#fromString(String) - */ -@SdkInternalApi -public final class ByteArrayAsyncRequestBody implements AsyncRequestBody { - private static final Logger log = Logger.loggerFor(ByteArrayAsyncRequestBody.class); - - private final byte[] bytes; - - private final String mimetype; - - public ByteArrayAsyncRequestBody(byte[] bytes, String mimetype) { - this.bytes = bytes.clone(); - this.mimetype = mimetype; - } - - @Override - public Optional contentLength() { - return Optional.of((long) bytes.length); - } - - @Override - public String contentType() { - return mimetype; - } - - @Override - public void subscribe(Subscriber s) { - // As per rule 1.9 we must throw NullPointerException if the subscriber parameter is null - if (s == null) { - throw new NullPointerException("Subscription MUST NOT be null."); - } - - // As per 2.13, this method must return normally (i.e. not throw). - try { - s.onSubscribe( - new Subscription() { - private boolean done = false; - - @Override - public void request(long n) { - if (done) { - return; - } - if (n > 0) { - done = true; - s.onNext(ByteBuffer.wrap(bytes)); - s.onComplete(); - } else { - s.onError(new IllegalArgumentException("§3.9: non-positive requests are not allowed!")); - } - } - - @Override - public void cancel() { - synchronized (this) { - if (!done) { - done = true; - } - } - } - } - ); - } catch (Throwable ex) { - log.error(() -> s + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", ex); - } - } -} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java new file mode 100644 index 000000000000..e7e9d00dd0e5 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java @@ -0,0 +1,157 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Logger; + +/** + * An implementation of {@link AsyncRequestBody} for providing data from the supplied {@link ByteBuffer} array. This is created + * using static methods on {@link AsyncRequestBody} + * + * @see AsyncRequestBody#fromBytes(byte[]) + * @see AsyncRequestBody#fromBytesUnsafe(byte[]) + * @see AsyncRequestBody#fromByteBuffer(ByteBuffer) + * @see AsyncRequestBody#fromByteBufferUnsafe(ByteBuffer) + * @see AsyncRequestBody#fromByteBuffers(ByteBuffer...) + * @see AsyncRequestBody#fromByteBuffersUnsafe(ByteBuffer...) + * @see AsyncRequestBody#fromString(String) + */ +@SdkInternalApi +public final class ByteBuffersAsyncRequestBody implements AsyncRequestBody { + private static final Logger log = Logger.loggerFor(ByteBuffersAsyncRequestBody.class); + + private final String mimetype; + private final Long length; + private final ByteBuffer[] buffers; + + private ByteBuffersAsyncRequestBody(String mimetype, Long length, ByteBuffer... buffers) { + this.mimetype = mimetype; + this.length = length; + this.buffers = buffers; + } + + @Override + public Optional contentLength() { + return Optional.ofNullable(length); + } + + @Override + public String contentType() { + return mimetype; + } + + @Override + public void subscribe(Subscriber s) { + // As per rule 1.9 we must throw NullPointerException if the subscriber parameter is null + if (s == null) { + throw new NullPointerException("Subscription MUST NOT be null."); + } + + // As per 2.13, this method must return normally (i.e. not throw). + try { + s.onSubscribe( + new Subscription() { + private final AtomicInteger index = new AtomicInteger(0); + private final AtomicBoolean completed = new AtomicBoolean(false); + + @Override + public void request(long n) { + if (completed.get()) { + return; + } + + if (n > 0) { + int i = index.getAndIncrement(); + + if (i >= buffers.length) { + return; + } + + long remaining = n; + + do { + ByteBuffer buffer = buffers[i]; + + // Pending discussions on https://github.com/aws/aws-sdk-java-v2/issues/3928 + if (buffer.isDirect()) { + buffer = BinaryUtils.toNonDirectBuffer(buffer); + } + + s.onNext(buffer.asReadOnlyBuffer()); + remaining--; + } while (remaining > 0 && (i = index.getAndIncrement()) < buffers.length); + + if (i >= buffers.length - 1 && completed.compareAndSet(false, true)) { + s.onComplete(); + } + } else { + s.onError(new IllegalArgumentException("§3.9: non-positive requests are not allowed!")); + } + } + + @Override + public void cancel() { + completed.set(true); + } + } + ); + } catch (Throwable ex) { + log.error(() -> s + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", ex); + } + } + + public static ByteBuffersAsyncRequestBody of(ByteBuffer... buffers) { + long length = Arrays.stream(buffers) + .mapToLong(ByteBuffer::remaining) + .sum(); + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(Long length, ByteBuffer... buffers) { + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(String mimetype, ByteBuffer... buffers) { + long length = Arrays.stream(buffers) + .mapToLong(ByteBuffer::remaining) + .sum(); + return new ByteBuffersAsyncRequestBody(mimetype, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(String mimetype, Long length, ByteBuffer... buffers) { + return new ByteBuffersAsyncRequestBody(mimetype, length, buffers); + } + + public static ByteBuffersAsyncRequestBody from(byte[] bytes) { + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, (long) bytes.length, + ByteBuffer.wrap(bytes)); + } + + public static ByteBuffersAsyncRequestBody from(String mimetype, byte[] bytes) { + return new ByteBuffersAsyncRequestBody(mimetype, (long) bytes.length, ByteBuffer.wrap(bytes)); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index 8fd7f0260b76..93d6d09578a6 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; @@ -58,10 +59,11 @@ public synchronized Iterable bufferAndCreateChunks(ByteBuffer buffer int availableToRead = bufferSize - bufferedBytes; int bytesToMove = Math.min(availableToRead, currentBytesRead - startPosition); + byte[] bytes = BinaryUtils.copyAllBytesFrom(buffer); if (bufferedBytes == 0) { - currentBuffer.put(buffer.array(), startPosition, bytesToMove); + currentBuffer.put(bytes, startPosition, bytesToMove); } else { - currentBuffer.put(buffer.array(), 0, bytesToMove); + currentBuffer.put(bytes, 0, bytesToMove); } startPosition = startPosition + bytesToMove; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java index 7521219a5030..a7cada02b06c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java @@ -31,7 +31,7 @@ public class AfterTransmissionExecutionInterceptorsStage @Override public Pair execute(Pair input, RequestExecutionContext context) throws Exception { - InterruptMonitor.checkInterrupted(); + InterruptMonitor.checkInterrupted(input.right()); // Update interceptor context InterceptorContext interceptorContext = context.executionContext().interceptorContext().copy(b -> b.httpResponse(input.right()) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java index 329b302ccba2..bfd10e742469 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper; +import software.amazon.awssdk.core.internal.metrics.SdkErrorType; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -47,11 +48,18 @@ public Response execute(SdkHttpFullRequest input, RequestExecutionConte context.attemptMetricCollector(apiCallAttemptMetrics); reportBackoffDelay(context); - Response response = wrapped.execute(input, context); + try { + Response response = wrapped.execute(input, context); + collectHttpMetrics(apiCallAttemptMetrics, response.httpResponse()); - collectHttpMetrics(apiCallAttemptMetrics, response.httpResponse()); - - return response; + if (!Boolean.TRUE.equals(response.isSuccess()) && response.exception() != null) { + reportErrorType(context, response.exception()); + } + return response; + } catch (Exception e) { + reportErrorType(context, e); + throw e; + } } private void reportBackoffDelay(RequestExecutionContext context) { @@ -60,4 +68,8 @@ private void reportBackoffDelay(RequestExecutionContext context) { context.attemptMetricCollector().reportMetric(CoreMetric.BACKOFF_DELAY_DURATION, lastBackoffDelay); } } + + private void reportErrorType(RequestExecutionContext context, Exception e) { + context.attemptMetricCollector().reportMetric(CoreMetric.ERROR_TYPE, SdkErrorType.fromException(e).toString()); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java index c576da162fba..d4777674452c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper; +import software.amazon.awssdk.core.internal.metrics.SdkErrorType; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -57,6 +58,12 @@ public CompletableFuture> execute(SdkHttpFullRequest input, if (t == null) { collectHttpMetrics(apiCallAttemptMetrics, r.httpResponse()); } + + if (t != null) { + reportErrorType(context, t.getCause()); + } else if (!Boolean.TRUE.equals(r.isSuccess()) && r.exception() != null) { + reportErrorType(context, r.exception()); + } }); CompletableFutureUtils.forwardExceptionTo(metricsCollectedFuture, executeFuture); @@ -69,4 +76,8 @@ private void reportBackoffDelay(RequestExecutionContext context) { context.attemptMetricCollector().reportMetric(CoreMetric.BACKOFF_DELAY_DURATION, lastBackoffDelay); } } + + private void reportErrorType(RequestExecutionContext context, Throwable t) { + context.attemptMetricCollector().reportMetric(CoreMetric.ERROR_TYPE, SdkErrorType.fromException(t).toString()); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java index 063d2a572e96..a40fdeb90b72 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java @@ -156,15 +156,29 @@ private CompletableFuture> executeHttpRequest(SdkHttpFullReque } }); - // Offload the completion of the future returned from this stage onto - // the future completion executor - responseHandlerFuture.whenCompleteAsync((r, t) -> { - if (t == null) { - responseFuture.complete(r); - } else { - responseFuture.completeExceptionally(t); + // Attempt to offload the completion of the future returned from this + // stage onto the future completion executor + CompletableFuture asyncComplete = + responseHandlerFuture.handleAsync((r, t) -> { + completeResponseFuture(responseFuture, r, t); + return null; + }, + futureCompletionExecutor); + + // It's possible the async execution above fails. If so, log a warning, + // and just complete it synchronously. + asyncComplete.whenComplete((ignored, asyncCompleteError) -> { + if (asyncCompleteError != null) { + log.debug(() -> String.format("Could not complete the service call future on the provided " + + "FUTURE_COMPLETION_EXECUTOR. The future will be completed synchronously by thread" + + " %s. This may be an indication that the executor is being overwhelmed by too" + + " many requests, and it may negatively affect performance. Consider changing " + + "the configuration of the executor to accommodate the load through the client.", + Thread.currentThread().getName()), + asyncCompleteError); + responseHandlerFuture.whenComplete((r, t) -> completeResponseFuture(responseFuture, r, t)); } - }, futureCompletionExecutor); + }); return responseFuture; } @@ -219,6 +233,14 @@ private TimeoutTracker setupAttemptTimer(CompletableFuture> ex timeoutMillis); } + private void completeResponseFuture(CompletableFuture> responseFuture, Response r, Throwable t) { + if (t == null) { + responseFuture.complete(r); + } else { + responseFuture.completeExceptionally(t); + } + } + /** * When an operation has a streaming input, the customer must supply an {@link AsyncRequestBody} to * provide the request content in a non-blocking manner. This adapts that interface to the diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java index 0ddf70959cae..f3c92a254bec 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java @@ -15,7 +15,6 @@ package software.amazon.awssdk.core.internal.interceptor; -import static software.amazon.awssdk.core.HttpChecksumConstant.HTTP_CHECKSUM_VALUE; import static software.amazon.awssdk.core.HttpChecksumConstant.SIGNING_METHOD; import static software.amazon.awssdk.core.internal.util.HttpChecksumResolver.getResolvedChecksumSpecs; @@ -23,7 +22,6 @@ import java.io.UncheckedIOException; import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumSpecs; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; @@ -47,49 +45,27 @@ @SdkInternalApi public class HttpChecksumInHeaderInterceptor implements ExecutionInterceptor { - @Override - public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { - ChecksumSpecs headerChecksumSpecs = HttpChecksumUtils.checksumSpecWithRequestAlgorithm(executionAttributes).orElse(null); - - if (shouldSkipHttpChecksumInHeader(context, executionAttributes, headerChecksumSpecs)) { - return; - } - Optional syncContent = context.requestBody(); - syncContent.ifPresent( - requestBody -> saveContentChecksum(requestBody, executionAttributes, headerChecksumSpecs.algorithm())); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - ChecksumSpecs checksumSpecs = getResolvedChecksumSpecs(executionAttributes); - - if (shouldSkipHttpChecksumInHeader(context, executionAttributes, checksumSpecs)) { - return context.httpRequest(); - } - - String httpChecksumValue = executionAttributes.getAttribute(HTTP_CHECKSUM_VALUE); - if (httpChecksumValue != null) { - return context.httpRequest().copy(r -> r.putHeader(checksumSpecs.headerName(), httpChecksumValue)); - } - return context.httpRequest(); - - } - /** - * Calculates the checksumSpecs of the provided request (and base64 encodes it), storing the result in - * executionAttribute "HttpChecksumValue". + * Calculates the checksum of the provided request (and base64 encodes it), and adds the header to the request. * *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do * that yet. */ - private static void saveContentChecksum(RequestBody requestBody, ExecutionAttributes executionAttributes, - Algorithm algorithm) { + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + ChecksumSpecs checksumSpecs = getResolvedChecksumSpecs(executionAttributes); + Optional syncContent = context.requestBody(); + + if (shouldSkipHttpChecksumInHeader(context, executionAttributes, checksumSpecs) || !syncContent.isPresent()) { + return context.httpRequest(); + } + try { String payloadChecksum = BinaryUtils.toBase64(HttpChecksumUtils.computeChecksum( - requestBody.contentStreamProvider().newStream(), algorithm)); - executionAttributes.putAttribute(HTTP_CHECKSUM_VALUE, payloadChecksum); + syncContent.get().contentStreamProvider().newStream(), checksumSpecs.algorithm())); + return context.httpRequest().copy(r -> r.putHeader(checksumSpecs.headerName(), payloadChecksum)); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java index c98cde397f0c..9729cd2076d7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java @@ -21,7 +21,6 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; @@ -41,10 +40,17 @@ */ @SdkInternalApi public class HttpChecksumRequiredInterceptor implements ExecutionInterceptor { - private static final ExecutionAttribute CONTENT_MD5_VALUE = new ExecutionAttribute<>("ContentMd5"); + /** + * Calculates the MD5 checksum of the provided request (and base64 encodes it), and adds the header to the request. + * + *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with + * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the + * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do + * that yet. + */ @Override - public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { boolean isHttpChecksumRequired = isHttpChecksumRequired(executionAttributes); boolean requestAlreadyHasMd5 = context.httpRequest().firstMatchingHeader(Header.CONTENT_MD5).isPresent(); @@ -52,7 +58,7 @@ public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttribut Optional asyncContent = context.asyncRequestBody(); if (!isHttpChecksumRequired || requestAlreadyHasMd5) { - return; + return context.httpRequest(); } if (asyncContent.isPresent()) { @@ -60,14 +66,13 @@ public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttribut + "for non-blocking content."); } - syncContent.ifPresent(requestBody -> saveContentMd5(requestBody, executionAttributes)); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - String contentMd5 = executionAttributes.getAttribute(CONTENT_MD5_VALUE); - if (contentMd5 != null) { - return context.httpRequest().copy(r -> r.putHeader(Header.CONTENT_MD5, contentMd5)); + if (syncContent.isPresent()) { + try { + String payloadMd5 = Md5Utils.md5AsBase64(syncContent.get().contentStreamProvider().newStream()); + return context.httpRequest().copy(r -> r.putHeader(Header.CONTENT_MD5, payloadMd5)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } return context.httpRequest(); } @@ -76,22 +81,4 @@ private boolean isHttpChecksumRequired(ExecutionAttributes executionAttributes) return executionAttributes.getAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED) != null || HttpChecksumUtils.isMd5ChecksumRequired(executionAttributes); } - - /** - * Calculates the MD5 checksum of the provided request (and base64 encodes it), storing the result in - * {@link #CONTENT_MD5_VALUE}. - * - *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with - * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the - * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do - * that yet. - */ - private void saveContentMd5(RequestBody requestBody, ExecutionAttributes executionAttributes) { - try { - String payloadMd5 = Md5Utils.md5AsBase64(requestBody.contentStreamProvider().newStream()); - executionAttributes.putAttribute(CONTENT_MD5_VALUE, payloadMd5); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/metrics/SdkErrorType.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/metrics/SdkErrorType.java new file mode 100644 index 000000000000..cbc293cdb73b --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/metrics/SdkErrorType.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.metrics; + +import java.io.IOException; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; +import software.amazon.awssdk.core.exception.ApiCallTimeoutException; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.retry.RetryUtils; + +/** + * General categories of errors that can be encountered when making an API call attempt. + *

+ * This class is NOT intended to fully distinguish the details of every error that is possible to encounter when making + * an API call attempt; for example, it is not a replacement for detailed logs. Instead, the categories are intentionally + * broad to make it easy at-a-glance what is causing issues with requests, and to help direct further investigation. + */ +@SdkInternalApi +public enum SdkErrorType { + /** + * The service responded with a throttling error. + */ + THROTTLING("Throttling"), + + /** + * The service responded with an error other than {@link #THROTTLING}. + */ + SERVER_ERROR("ServerError"), + + /** + * A clientside timeout occurred, either an attempt level timeout, or API call level. + */ + CONFIGURED_TIMEOUT("ConfiguredTimeout"), + + /** + * An I/O error. + */ + IO("IO"), + + /** + * Catch-all type for errors that don't fit into the other categories. + */ + OTHER("Other"), + + ; + + private final String name; + + SdkErrorType(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + + public static SdkErrorType fromException(Throwable e) { + if (e instanceof IOException) { + return IO; + } + + if (e instanceof SdkException) { + SdkException sdkError = (SdkException) e; + if (sdkError instanceof ApiCallTimeoutException || sdkError instanceof ApiCallAttemptTimeoutException) { + return CONFIGURED_TIMEOUT; + } + + if (RetryUtils.isThrottlingException(sdkError)) { + return THROTTLING; + } + + if (e instanceof SdkServiceException) { + return SERVER_ERROR; + } + } + + return OTHER; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java index 3b5b50f7e9a0..2894b2bd8dc4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java @@ -69,8 +69,7 @@ private static Class loadClassViaContext(String fqcn) { * @throws ClassNotFoundException * if failed to load the class */ - public static Class loadClass(String fqcn, Class... classes) - throws ClassNotFoundException { + public static Class loadClass(String fqcn, Class... classes) throws ClassNotFoundException { return loadClass(fqcn, true, classes); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java index 8610c32e49f3..5377e0f04e59 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java @@ -16,14 +16,12 @@ package software.amazon.awssdk.core.internal.waiters; import java.util.List; -import java.util.Optional; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.waiters.WaiterAcceptor; import software.amazon.awssdk.core.waiters.WaiterResponse; -import software.amazon.awssdk.core.waiters.WaiterState; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.Validate; @@ -45,45 +43,42 @@ public WaiterExecutor(WaiterConfiguration configuration, } WaiterResponse execute(Supplier pollingFunction) { - return doExecute(pollingFunction, 0, System.currentTimeMillis()); - } - - WaiterResponse doExecute(Supplier pollingFunction, int attemptNumber, long startTime) { - attemptNumber++; - T response; - try { - response = pollingFunction.get(); - } catch (Exception exception) { - return evaluate(pollingFunction, Either.right(exception), attemptNumber, startTime); - } - - return evaluate(pollingFunction, Either.left(response), attemptNumber, startTime); - } + int attemptNumber = 0; + long startTime = System.currentTimeMillis(); - private WaiterResponse evaluate(Supplier pollingFunction, - Either responseOrException, - int attemptNumber, - long startTime) { - Optional> waiterAcceptor = executorHelper.firstWaiterAcceptorIfMatched(responseOrException); + while (true) { + attemptNumber++; - if (waiterAcceptor.isPresent()) { - WaiterState state = waiterAcceptor.get().waiterState(); - switch (state) { + Either polledResponse = pollResponse(pollingFunction); + WaiterAcceptor waiterAcceptor = firstWaiterAcceptor(polledResponse); + switch (waiterAcceptor.waiterState()) { case SUCCESS: - return executorHelper.createWaiterResponse(responseOrException, attemptNumber); + return executorHelper.createWaiterResponse(polledResponse, attemptNumber); case RETRY: - return maybeRetry(pollingFunction, attemptNumber, startTime); + waitToRetry(attemptNumber, startTime); + break; case FAILURE: - throw executorHelper.waiterFailureException(waiterAcceptor.get()); + throw executorHelper.waiterFailureException(waiterAcceptor); default: throw new UnsupportedOperationException(); } } + } + + private Either pollResponse(Supplier pollingFunction) { + try { + return Either.left(pollingFunction.get()); + } catch (Exception exception) { + return Either.right(exception); + } + } - throw executorHelper.noneMatchException(responseOrException); + private WaiterAcceptor firstWaiterAcceptor(Either responseOrException) { + return executorHelper.firstWaiterAcceptorIfMatched(responseOrException) + .orElseThrow(() -> executorHelper.noneMatchException(responseOrException)); } - private WaiterResponse maybeRetry(Supplier pollingFunction, int attemptNumber, long startTime) { + private void waitToRetry(int attemptNumber, long startTime) { Either nextDelayOrUnretryableException = executorHelper.nextDelayOrUnretryableException(attemptNumber, startTime); @@ -97,6 +92,5 @@ private WaiterResponse maybeRetry(Supplier pollingFunction, int attemptNum Thread.currentThread().interrupt(); throw SdkClientException.create("The thread got interrupted", e); } - return doExecute(pollingFunction, attemptNumber, startTime); } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java index fda6bbc67113..f4529d32c1a0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java @@ -118,6 +118,22 @@ public final class CoreMetric { public static final SdkMetric AWS_EXTENDED_REQUEST_ID = metric("AwsExtendedRequestId", String.class, MetricLevel.INFO); + /** + * The type of error that occurred for a call attempt. + *

+ * The following are possible values: + *

    + *
  • Throttling - The service responded with a throttling error.
  • + *
  • ServerError - The service responded with an error other than throttling.
  • + *
  • ClientTimeout - A client timeout occurred, either at the API call level, or API call attempt level.
  • + *
  • IO - An I/O error occurred.
  • + *
  • Other - Catch-all for other errors that don't fall into the above categories.
  • + *
+ *

+ */ + public static final SdkMetric ERROR_TYPE = + metric("ErrorType", String.class, MetricLevel.INFO); + private CoreMetric() { } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java index e0252c9ba6d2..aab643cbb6a6 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java @@ -15,44 +15,39 @@ package software.amazon.awssdk.core.async; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import io.reactivex.Flowable; -import java.io.File; -import java.io.FileWriter; import java.io.IOException; -import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; import java.nio.file.Files; import java.nio.file.Path; -import java.time.Instant; -import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.stream.Collectors; import org.assertj.core.util.Lists; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.http.async.SimpleSubscriber; import software.amazon.awssdk.utils.BinaryUtils; -import software.amazon.awssdk.utils.StringInputStream; -@RunWith(Parameterized.class) public class AsyncRequestBodyTest { - private final static String testString = "Hello!"; - private final static Path path; + + private static final String testString = "Hello!"; + private static final Path path; static { FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); @@ -64,27 +59,16 @@ public class AsyncRequestBodyTest { } } - @Parameterized.Parameters - public static AsyncRequestBody[] data() { - return new AsyncRequestBody[]{ - AsyncRequestBody.fromString(testString), - AsyncRequestBody.fromFile(path) - }; + @ParameterizedTest + @MethodSource("contentIntegrityChecks") + void hasCorrectLength(AsyncRequestBody asyncRequestBody) { + assertEquals(testString.length(), asyncRequestBody.contentLength().get()); } - private AsyncRequestBody provider; - - public AsyncRequestBodyTest(AsyncRequestBody provider) { - this.provider = provider; - } - @Test - public void hasCorrectLength() { - assertThat(provider.contentLength().get()).isEqualTo(testString.length()); - } - - @Test - public void hasCorrectContent() throws InterruptedException { + @ParameterizedTest + @MethodSource("contentIntegrityChecks") + void hasCorrectContent(AsyncRequestBody asyncRequestBody) throws InterruptedException { StringBuilder sb = new StringBuilder(); CountDownLatch done = new CountDownLatch(1); @@ -106,75 +90,268 @@ public void onComplete() { } }; - provider.subscribe(subscriber); + asyncRequestBody.subscribe(subscriber); done.await(); - assertThat(sb.toString()).isEqualTo(testString); + assertEquals(testString, sb.toString()); + } + + private static AsyncRequestBody[] contentIntegrityChecks() { + return new AsyncRequestBody[] { + AsyncRequestBody.fromString(testString), + AsyncRequestBody.fromFile(path) + }; } @Test - public void stringConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world"); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=UTF-8"); + void fromBytesCopiesTheProvidedByteArray() { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + byte[] bytesClone = bytes.clone(); + + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromBytes(bytes); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytesClone, publishedByteArray); } @Test - public void stringWithEncoding1ConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", StandardCharsets.ISO_8859_1); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=ISO-8859-1"); + void fromBytesUnsafeDoesNotCopyTheProvidedByteArray() { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromBytesUnsafe(bytes); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytes, publishedByteArray); + } + + @ParameterizedTest + @MethodSource("safeByteBufferBodyBuilders") + void safeByteBufferBuildersCopyTheProvidedBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + byte[] bytesClone = bytes.clone(); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(ByteBuffer.wrap(bytes)); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytesClone, publishedByteArray); + } + + private static Function[] safeByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBuffer; + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffers; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + return new Function[] {fromByteBuffer, fromRemainingByteBuffer, fromByteBuffers, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("unsafeByteBufferBodyBuilders") + void unsafeByteBufferBuildersDoNotCopyTheProvidedBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(ByteBuffer.wrap(bytes)); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytes, publishedByteArray); + } + + private static Function[] unsafeByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBufferUnsafe; + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBufferUnsafe; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffersUnsafe; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffersUnsafe; + return new Function[] {fromByteBuffer, fromRemainingByteBuffer, fromByteBuffers, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("nonRewindingByteBufferBodyBuilders") + void nonRewindingByteBufferBuildersReadFromTheInputBufferPosition( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + int remaining = bb.remaining(); + assertEquals(remaining, publishedBuffer.get().remaining()); + for (int i = 0; i < remaining; i++) { + assertEquals(bb.get(), publishedBuffer.get().get()); + } + } + + private static Function[] nonRewindingByteBufferBodyBuilders() { + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromRemainingByteBufferUnsafe = AsyncRequestBody::fromRemainingByteBufferUnsafe; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + Function fromRemainingByteBuffersUnsafe = AsyncRequestBody::fromRemainingByteBuffersUnsafe; + return new Function[] {fromRemainingByteBuffer, fromRemainingByteBufferUnsafe, fromRemainingByteBuffers, + fromRemainingByteBuffersUnsafe}; + } + + @ParameterizedTest + @MethodSource("safeNonRewindingByteBufferBodyBuilders") + void safeNonRewindingByteBufferBuildersCopyFromTheInputBufferPosition( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + int remaining = bb.remaining(); + assertEquals(remaining, publishedBuffer.get().capacity()); + for (int i = 0; i < remaining; i++) { + assertEquals(bb.get(), publishedBuffer.get().get()); + } + } + + private static Function[] safeNonRewindingByteBufferBodyBuilders() { + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + return new Function[] {fromRemainingByteBuffer, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("rewindingByteBufferBodyBuilders") + void rewindingByteBufferBuildersDoNotRewindTheInputBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + }); + + asyncRequestBody.subscribe(subscriber); + + assertEquals(expectedPosition, bb.position()); + } + + @ParameterizedTest + @MethodSource("rewindingByteBufferBodyBuilders") + void rewindingByteBufferBuildersReadTheInputBufferFromTheBeginning( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + bb.position(bytes.length / 2); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + assertEquals(0, publishedBuffer.get().position()); + publishedBuffer.get().rewind(); + bb.rewind(); + assertEquals(bb, publishedBuffer.get()); + } + + private static Function[] rewindingByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBuffer; + Function fromByteBufferUnsafe = AsyncRequestBody::fromByteBufferUnsafe; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffers; + Function fromByteBuffersUnsafe = AsyncRequestBody::fromByteBuffersUnsafe; + return new Function[] {fromByteBuffer, fromByteBufferUnsafe, fromByteBuffers, fromByteBuffersUnsafe}; + } + + @ParameterizedTest + @ValueSource(strings = {"US-ASCII", "ISO-8859-1", "UTF-8", "UTF-16BE", "UTF-16LE", "UTF-16"}) + void charsetsAreConvertedToTheCorrectContentType(Charset charset) { + AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", charset); + assertEquals("text/plain; charset=" + charset.name(), requestBody.contentType()); } @Test - public void stringWithEncoding2ConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", StandardCharsets.UTF_16BE); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=UTF-16BE"); + void stringConstructorHasCorrectDefaultContentType() { + AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world"); + assertEquals("text/plain; charset=UTF-8", requestBody.contentType()); } @Test - public void fileConstructorHasCorrectContentType() { + void fileConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.fromFile(path); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void bytesArrayConstructorHasCorrectContentType() { + void bytesArrayConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.fromBytes("hello world".getBytes()); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void bytesBufferConstructorHasCorrectContentType() { + void bytesBufferConstructorHasCorrectContentType() { ByteBuffer byteBuffer = ByteBuffer.wrap("hello world".getBytes()); AsyncRequestBody requestBody = AsyncRequestBody.fromByteBuffer(byteBuffer); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void emptyBytesConstructorHasCorrectContentType() { + void emptyBytesConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.empty(); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void publisherConstructorHasCorrectContentType() { + void publisherConstructorHasCorrectContentType() { List requestBodyStrings = Lists.newArrayList("A", "B", "C"); List bodyBytes = requestBodyStrings.stream() - .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) - .collect(Collectors.toList()); + .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + .collect(Collectors.toList()); Publisher bodyPublisher = Flowable.fromIterable(bodyBytes); AsyncRequestBody requestBody = AsyncRequestBody.fromPublisher(bodyPublisher); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); - } - - @Test - public void fromBytes_byteArrayNotNull_createsCopy() { - byte[] original = {0x1, 0x2, 0x3, 0x4}; - byte[] toModify = new byte[original.length]; - System.arraycopy(original, 0, toModify, 0, original.length); - AsyncRequestBody body = AsyncRequestBody.fromBytes(toModify); - for (int i = 0; i < toModify.length; ++i) { - toModify[i]++; - } - ByteBuffer publishedBb = Flowable.fromPublisher(body).toList().blockingGet().get(0); - assertThat(BinaryUtils.copyAllBytesFrom(publishedBb)).isEqualTo(original); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/builder/DefaultClientBuilderTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/builder/DefaultClientBuilderTest.java index ec526330cdc9..bc4a00954ca8 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/builder/DefaultClientBuilderTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/builder/DefaultClientBuilderTest.java @@ -38,6 +38,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_FILE_SUPPLIER; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_NAME; import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_POLICY; +import static software.amazon.awssdk.core.client.config.SdkClientOption.SCHEDULED_EXECUTOR_SERVICE; import static software.amazon.awssdk.core.internal.SdkInternalTestAdvancedClientOption.ENDPOINT_OVERRIDDEN_OVERRIDE; import java.beans.BeanInfo; @@ -52,6 +53,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.function.Supplier; import org.assertj.core.api.Assertions; import org.junit.Before; @@ -76,6 +79,7 @@ import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.ScheduledExecutorUtils.UnmanagedScheduledExecutorService; import software.amazon.awssdk.utils.StringInputStream; /** @@ -132,6 +136,7 @@ public void overrideConfigurationReturnsSetValues() { .type(ProfileFile.Type.CONFIGURATION) .build(); String profileName = "name"; + ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1); ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() .executionInterceptors(interceptors) @@ -148,6 +153,7 @@ public void overrideConfigurationReturnsSetValues() { .metricPublishers(metricPublishers) .executionAttributes(executionAttributes) .putAdvancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE, Boolean.TRUE) + .scheduledExecutorService(scheduledExecutorService) .build(); TestClientBuilder builder = testClientBuilder().overrideConfiguration(overrideConfig); @@ -166,6 +172,7 @@ public void overrideConfigurationReturnsSetValues() { assertThat(builderOverrideConfig.metricPublishers()).isEqualTo(metricPublishers); assertThat(builderOverrideConfig.executionAttributes().getAttributes()).isEqualTo(executionAttributes.getAttributes()); assertThat(builderOverrideConfig.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE)).isEqualTo(Optional.of(Boolean.TRUE)); + assertThat(builderOverrideConfig.scheduledExecutorService().get()).isEqualTo(scheduledExecutorService); } @Test @@ -189,6 +196,7 @@ public void overrideConfigurationOmitsUnsetValues() { assertThat(builderOverrideConfig.metricPublishers()).isEmpty(); assertThat(builderOverrideConfig.executionAttributes().getAttributes()).isEmpty(); assertThat(builderOverrideConfig.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE)).isEmpty(); + assertThat(builderOverrideConfig.scheduledExecutorService()).isEmpty(); } @Test @@ -198,6 +206,7 @@ public void buildIncludesClientOverrides() { interceptors.add(interceptor); RetryPolicy retryPolicy = RetryPolicy.builder().build(); + ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1); Map> headers = new HashMap<>(); List headerValues = new ArrayList<>(); @@ -247,6 +256,7 @@ public void close() { .metricPublishers(metricPublishers) .executionAttributes(executionAttributes) .putAdvancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE, Boolean.TRUE) + .scheduledExecutorService(scheduledExecutorService) .build(); SdkClientConfiguration config = @@ -267,6 +277,9 @@ public void close() { assertThat(config.option(METRIC_PUBLISHERS)).contains(metricPublisher); assertThat(config.option(EXECUTION_ATTRIBUTES).getAttribute(execAttribute)).isEqualTo("value"); assertThat(config.option(ENDPOINT_OVERRIDDEN)).isEqualTo(Boolean.TRUE); + UnmanagedScheduledExecutorService customScheduledExecutorService = + (UnmanagedScheduledExecutorService) config.option(SCHEDULED_EXECUTOR_SERVICE); + assertThat(customScheduledExecutorService.scheduledExecutorService()).isEqualTo(scheduledExecutorService); } @Test diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java deleted file mode 100644 index 378fbf2f59c3..000000000000 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.internal.async; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicBoolean; -import org.junit.jupiter.api.Test; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import software.amazon.awssdk.core.internal.util.Mimetype; - -public class ByteArrayAsyncRequestBodyTest { - private class testSubscriber implements Subscriber { - private Subscription subscription; - protected AtomicBoolean onCompleteCalled = new AtomicBoolean(false); - - @Override - public void onSubscribe(Subscription s) { - this.subscription = s; - s.request(1); - } - - @Override - public void onNext(ByteBuffer byteBuffer) { - - } - - @Override - public void onError(Throwable throwable) { - - } - - @Override - public void onComplete() { - subscription.request(1); - onCompleteCalled.set(true); - } - } - - testSubscriber subscriber = new testSubscriber(); - - @Test - public void concurrentRequests_shouldCompleteNormally() { - ByteArrayAsyncRequestBody byteArrayReq = new ByteArrayAsyncRequestBody("Hello World!".getBytes(), - Mimetype.MIMETYPE_OCTET_STREAM); - byteArrayReq.subscribe(subscriber); - assertTrue(subscriber.onCompleteCalled.get()); - } - -} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java new file mode 100644 index 000000000000..b4073247f8b9 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java @@ -0,0 +1,227 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.utils.BinaryUtils; + +class ByteBuffersAsyncRequestBodyTest { + + private static class TestSubscriber implements Subscriber { + private Subscription subscription; + private boolean onCompleteCalled = false; + private int callsToComplete = 0; + private final List publishedResults = Collections.synchronizedList(new ArrayList<>()); + + public void request(long n) { + subscription.request(n); + } + + @Override + public void onSubscribe(Subscription s) { + this.subscription = s; + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + publishedResults.add(byteBuffer); + } + + @Override + public void onError(Throwable throwable) { + throw new IllegalStateException(throwable); + } + + @Override + public void onComplete() { + onCompleteCalled = true; + callsToComplete++; + } + } + + @Test + public void subscriberIsMarkedAsCompleted() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.from("Hello World!".getBytes(StandardCharsets.UTF_8)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsMarkedAsCompletedWhenARequestIsMadeForMoreBuffersThanAreAvailable() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.from("Hello World!".getBytes(StandardCharsets.UTF_8)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(2); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsThreadSafeAndMarkedAsCompletedExactlyOnce() throws InterruptedException { + int numBuffers = 100; + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(IntStream.range(0, numBuffers) + .mapToObj(i -> ByteBuffer.wrap(new byte[1])) + .toArray(ByteBuffer[]::new)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + + int parallelism = 8; + ExecutorService executorService = Executors.newFixedThreadPool(parallelism); + for (int i = 0; i < parallelism; i++) { + executorService.submit(() -> { + for (int j = 0; j < numBuffers; j++) { + subscriber.request(2); + } + }); + } + executorService.shutdown(); + executorService.awaitTermination(1, TimeUnit.MINUTES); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.callsToComplete); + assertEquals(numBuffers, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsNotMarkedAsCompletedWhenThereAreRemainingBuffersToPublish() { + byte[] helloWorld = "Hello World!".getBytes(StandardCharsets.UTF_8); + byte[] goodbyeWorld = "Goodbye World!".getBytes(StandardCharsets.UTF_8); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of((long) (helloWorld.length + goodbyeWorld.length), + ByteBuffer.wrap(helloWorld), + ByteBuffer.wrap(goodbyeWorld)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + assertFalse(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberReceivesAllBuffers() { + byte[] helloWorld = "Hello World!".getBytes(StandardCharsets.UTF_8); + byte[] goodbyeWorld = "Goodbye World!".getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of((long) (helloWorld.length + goodbyeWorld.length), + ByteBuffer.wrap(helloWorld), + ByteBuffer.wrap(goodbyeWorld)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(2); + + assertEquals(2, subscriber.publishedResults.size()); + assertTrue(subscriber.onCompleteCalled); + assertArrayEquals(helloWorld, BinaryUtils.copyAllBytesFrom(subscriber.publishedResults.get(0))); + assertArrayEquals(goodbyeWorld, BinaryUtils.copyAllBytesFrom(subscriber.publishedResults.get(1))); + } + + @Test + public void multipleSubscribersReceiveTheSameResults() { + ByteBuffer sourceBuffer = ByteBuffer.wrap("Hello World!".getBytes(StandardCharsets.UTF_8)); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(sourceBuffer); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + TestSubscriber otherSubscriber = new TestSubscriber(); + requestBody.subscribe(otherSubscriber); + otherSubscriber.request(1); + + ByteBuffer publishedBuffer = subscriber.publishedResults.get(0); + ByteBuffer otherPublishedBuffer = otherSubscriber.publishedResults.get(0); + + assertEquals(publishedBuffer, otherPublishedBuffer); + } + + @Test + public void canceledSubscriberDoesNotReturnNewResults() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(ByteBuffer.wrap(new byte[0])); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + + subscriber.subscription.cancel(); + subscriber.request(1); + + assertTrue(subscriber.publishedResults.isEmpty()); + } + + // Pending discussions on https://github.com/aws/aws-sdk-java-v2/issues/3928 + @Test + public void directBuffersAreCoppiedToNonDirectBuffers() { + byte[] bytes = "Hello World!".getBytes(StandardCharsets.UTF_8); + ByteBuffer buffer = ByteBuffer.allocateDirect(bytes.length) + .put(bytes); + buffer.flip(); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(buffer); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + ByteBuffer publishedBuffer = subscriber.publishedResults.get(0); + assertFalse(publishedBuffer.isDirect()); + byte[] publishedBytes = new byte[publishedBuffer.remaining()]; + publishedBuffer.get(publishedBytes); + assertArrayEquals(bytes, publishedBytes); + } + + @Test + public void staticOfByteBufferConstructorSetsLengthBasedOnBufferRemaining() { + ByteBuffer bb1 = ByteBuffer.allocate(2); + ByteBuffer bb2 = ByteBuffer.allocate(2); + bb2.position(1); + ByteBuffersAsyncRequestBody body = ByteBuffersAsyncRequestBody.of(bb1, bb2); + assertTrue(body.contentLength().isPresent()); + assertEquals(bb1.remaining() + bb2.remaining(), body.contentLength().get()); + } + + @Test + public void staticFromBytesConstructorSetsLengthBasedOnArrayLength() { + byte[] bytes = new byte[2]; + ByteBuffersAsyncRequestBody body = ByteBuffersAsyncRequestBody.from(bytes); + assertTrue(body.contentLength().isPresent()); + assertEquals(bytes.length, body.contentLength().get()); + } + +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java index 092682e54998..df8126db2343 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java @@ -16,11 +16,14 @@ package software.amazon.awssdk.core.internal.http.pipeline.stages; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -31,6 +34,9 @@ import java.time.Duration; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -47,6 +53,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.TransformingAsyncResponseHandler; import software.amazon.awssdk.core.internal.http.timers.ClientExecutionAndRequestTimerTestUtils; import software.amazon.awssdk.core.internal.util.AsyncResponseHandlerTestUtils; import software.amazon.awssdk.http.SdkHttpFullRequest; @@ -54,6 +61,8 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.ThreadFactoryBuilder; import utils.ValidSdkObjects; @RunWith(MockitoJUnitRunner.class) @@ -152,6 +161,82 @@ public void testExecute_contextContainsMetricCollector_addsChildToExecuteRequest } } + @Test + public void execute_handlerFutureCompletedNormally_futureCompletionExecutorRejectsWhenCompleteAsync_futureCompletedSynchronously() { + ExecutorService mockExecutor = mock(ExecutorService.class); + doThrow(new RejectedExecutionException("Busy")).when(mockExecutor).execute(any(Runnable.class)); + + SdkClientConfiguration config = + SdkClientConfiguration.builder() + .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, mockExecutor) + .option(ASYNC_HTTP_CLIENT, sdkAsyncHttpClient) + .build(); + HttpClientDependencies dependencies = HttpClientDependencies.builder().clientConfiguration(config).build(); + + TransformingAsyncResponseHandler mockHandler = mock(TransformingAsyncResponseHandler.class); + CompletableFuture prepareFuture = new CompletableFuture(); + when(mockHandler.prepare()).thenReturn(prepareFuture); + + stage = new MakeAsyncHttpRequestStage<>(mockHandler, dependencies); + + CompletableFuture requestFuture = CompletableFuture.completedFuture( + ValidSdkObjects.sdkHttpFullRequest().build()); + + CompletableFuture executeFuture = stage.execute(requestFuture, requestContext()); + + long testThreadId = Thread.currentThread().getId(); + CompletableFuture afterWhenComplete = + executeFuture.whenComplete((r, t) -> assertThat(Thread.currentThread().getId()).isEqualTo(testThreadId)); + + prepareFuture.complete(null); + + afterWhenComplete.join(); + + verify(mockExecutor).execute(any(Runnable.class)); + } + + @Test + public void execute_handlerFutureCompletedExceptionally_doesNotAttemptSynchronousComplete() { + String threadNamePrefix = "async-handle-test"; + ExecutorService mockExecutor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().threadNamePrefix(threadNamePrefix).build()); + + SdkClientConfiguration config = + SdkClientConfiguration.builder() + .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, mockExecutor) + .option(ASYNC_HTTP_CLIENT, sdkAsyncHttpClient) + .build(); + HttpClientDependencies dependencies = HttpClientDependencies.builder().clientConfiguration(config).build(); + + TransformingAsyncResponseHandler mockHandler = mock(TransformingAsyncResponseHandler.class); + CompletableFuture prepareFuture = spy(new CompletableFuture()); + when(mockHandler.prepare()).thenReturn(prepareFuture); + + stage = new MakeAsyncHttpRequestStage<>(mockHandler, dependencies); + + CompletableFuture requestFuture = CompletableFuture.completedFuture( + ValidSdkObjects.sdkHttpFullRequest().build()); + + CompletableFuture executeFuture = stage.execute(requestFuture, requestContext()); + + try { + CompletableFuture afterHandle = + executeFuture.handle((r, t) -> assertThat(Thread.currentThread().getName()).startsWith(threadNamePrefix)); + + prepareFuture.completeExceptionally(new RuntimeException("parse error")); + + afterHandle.join(); + + assertThatThrownBy(executeFuture::join) + .hasCauseInstanceOf(RuntimeException.class) + .hasMessageContaining("parse error"); + + verify(prepareFuture, times(0)).whenComplete(any()); + } finally { + mockExecutor.shutdown(); + } + } + private HttpClientDependencies clientDependencies(Duration timeout) { SdkClientConfiguration configuration = SdkClientConfiguration.builder() .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, Runnable::run) diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/metrics/ErrorTypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/metrics/ErrorTypeTest.java new file mode 100644 index 000000000000..6c20c57d8804 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/metrics/ErrorTypeTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.metrics; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; +import software.amazon.awssdk.core.exception.ApiCallTimeoutException; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkServiceException; + +public class ErrorTypeTest { + + @ParameterizedTest + @MethodSource("testCases") + public void fromException_mapsToCorrectType(TestCase tc) { + assertThat(SdkErrorType.fromException(tc.thrown)).isEqualTo(tc.expectedType); + } + + private static Stream testCases() { + return Stream.of( + tc(new IOException("I/O"), SdkErrorType.IO), + + tc(TestServiceException.builder().build(), SdkErrorType.SERVER_ERROR), + tc(TestServiceException.builder().throttling(true).build(), SdkErrorType.THROTTLING), + + tc(ApiCallAttemptTimeoutException.builder().message("Attempt timeout").build(), SdkErrorType.CONFIGURED_TIMEOUT), + tc(ApiCallTimeoutException.builder().message("Call timeout").build(), SdkErrorType.CONFIGURED_TIMEOUT), + + tc(SdkClientException.create("Unmarshalling error"), SdkErrorType.OTHER), + + tc(new OutOfMemoryError("OOM"), SdkErrorType.OTHER) + ); + } + + private static TestCase tc(Throwable thrown, SdkErrorType expectedType) { + return new TestCase(thrown, expectedType); + } + + private static class TestCase { + private final Throwable thrown; + private final SdkErrorType expectedType; + + public TestCase(Throwable thrown, SdkErrorType expectedType) { + this.thrown = thrown; + this.expectedType = expectedType; + } + } + + private static class TestServiceException extends SdkServiceException { + private final boolean throttling; + + protected TestServiceException(BuilderImpl b) { + super(b); + this.throttling = b.throttling; + } + + @Override + public boolean isThrottlingException() { + return throttling; + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public interface Builder extends SdkServiceException.Builder { + Builder throttling(Boolean throttling); + + @Override + TestServiceException build(); + } + + public static class BuilderImpl extends SdkServiceException.BuilderImpl implements Builder { + private boolean throttling; + + @Override + public boolean equalsBySdkFields(Object other) { + return super.equalsBySdkFields(other); + } + + @Override + public Builder throttling(Boolean throttling) { + this.throttling = throttling; + return this; + } + + @Override + public TestServiceException build() { + return new TestServiceException(this); + } + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java new file mode 100644 index 000000000000..2df65de46e0b --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.waiters; + +import java.util.Arrays; +import java.util.concurrent.atomic.LongAdder; +import org.junit.jupiter.api.Test; +import org.testng.Assert; +import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; +import software.amazon.awssdk.core.waiters.WaiterAcceptor; +import software.amazon.awssdk.core.waiters.WaiterOverrideConfiguration; + +class WaiterExecutorTest { + @Test + void largeMaxAttempts() { + + int expectedAttempts = 10_000; + + WaiterOverrideConfiguration conf = + WaiterOverrideConfiguration.builder() + .maxAttempts(expectedAttempts) + .backoffStrategy(BackoffStrategy.none()) + .build(); + + WaiterExecutor sut = + new WaiterExecutor<>(new WaiterConfiguration(conf), + Arrays.asList( + WaiterAcceptor.retryOnResponseAcceptor(c -> c < expectedAttempts), + WaiterAcceptor.successOnResponseAcceptor(c -> c == expectedAttempts) + )); + + LongAdder attemptCounter = new LongAdder(); + sut.execute(() -> { + attemptCounter.increment(); + return attemptCounter.intValue(); + }); + + Assert.assertEquals(attemptCounter.intValue(), expectedAttempts); + } +} \ No newline at end of file diff --git a/docs/design/core/metrics/MetricsList.md b/docs/design/core/metrics/MetricsList.md index 1046d913de46..7203e4a6e531 100644 --- a/docs/design/core/metrics/MetricsList.md +++ b/docs/design/core/metrics/MetricsList.md @@ -29,7 +29,8 @@ class within `sdk-core`. | AwsExtendedRequestId | `String` | The extended request ID of the service request.| | UnmarshallingDuration | `Duration` | The duration of time taken to unmarshall the HTTP response to an SDK response. | | ServiceCallDuration | `Duration` | The duration of time taken to connect to the service (or acquire a connection from the connection pool), send the serialized request and receive the initial response (e.g. HTTP status code and headers). This DOES NOT include the time taken to read the entire response from the service. | -| `RetryCount` | `Integer` | The number of retries that the SDK performed in the execution of the request. 0 implies that the request worked the first time, and no retries were attempted. | +| RetryCount | `Integer` | The number of retries that the SDK performed in the execution of the request. 0 implies that the request worked the first time, and no retries were attempted. | +| ErrorType | `String` | The general type or category of error that was encountered for a failed API call attempt.
The following are possible values:
`Throttling` - The service responded with a throttling error.
`ServerError` - The service responded with an error other than throttling.
`ConfiguredTimeout` - A client timeout occurred, either at the API call level, or API call attempt level.
`IO` - An I/O error occurred.
`Other` - Catch-all for other errors that don't fall into the above categories.| ## HTTP Metrics diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index b330c46241e1..0cb525bb39bd 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index e89d2ea099aa..00ffd95911ff 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT apache-client diff --git a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java index c40d7671f86d..af46691e5a19 100644 --- a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java +++ b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java @@ -16,8 +16,9 @@ package software.amazon.awssdk.http.apache.internal.conn; import java.time.Duration; +import java.util.Collections; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.WeakHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -48,7 +49,7 @@ public final class IdleConnectionReaper { private volatile ReaperTask reaperTask; private IdleConnectionReaper() { - this.connectionManagers = new ConcurrentHashMap<>(); + this.connectionManagers = Collections.synchronizedMap(new WeakHashMap<>()); this.executorServiceSupplier = () -> { ExecutorService e = Executors.newSingleThreadExecutor(r -> { diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index bd5423befad0..041e4c41d6d5 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index 495e63a41356..c9ddeb1a60e3 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 @@ -85,6 +85,15 @@ io.netty netty-transport-classes-epoll + + io.netty + netty-resolver + + + io.netty + netty-resolver-dns + true + diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java index 78a3fa80fa87..c12aeab10180 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java @@ -103,6 +103,7 @@ private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefa .sdkEventLoopGroup(sdkEventLoopGroup) .sslProvider(resolveSslProvider(builder)) .proxyConfiguration(builder.proxyConfiguration) + .useNonBlockingDnsResolver(builder.useNonBlockingDnsResolver) .build(); } @@ -475,6 +476,15 @@ public interface Builder extends SdkAsyncHttpClient.Builder http2ConfigurationBuilderConsumer); + + /** + * Configure whether to use a non-blocking dns resolver or not. False by default, as netty's default dns resolver is + * blocking; it namely calls java.net.InetAddress.getByName. + *

+ * When enabled, a non-blocking dns resolver will be used instead, by modifying netty's bootstrap configuration. + * See https://netty.io/news/2016/05/26/4-1-0-Final.html + */ + Builder useNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver); } /** @@ -492,6 +502,7 @@ private static final class DefaultBuilder implements Builder { private Http2Configuration http2Configuration; private SslProvider sslProvider; private ProxyConfiguration proxyConfiguration; + private Boolean useNonBlockingDnsResolver; private DefaultBuilder() { } @@ -716,6 +727,16 @@ public void setHttp2Configuration(Http2Configuration http2Configuration) { http2Configuration(http2Configuration); } + @Override + public Builder useNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver) { + this.useNonBlockingDnsResolver = useNonBlockingDnsResolver; + return this; + } + + public void setUseNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver) { + useNonBlockingDnsResolver(useNonBlockingDnsResolver); + } + @Override public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) { if (standardOptions.get(SdkHttpConfigurationOption.TLS_NEGOTIATION_TIMEOUT) == null) { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java index abb665f2c39a..254211e9303f 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java @@ -19,11 +19,13 @@ import io.netty.channel.ChannelFactory; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; import java.util.Optional; import java.util.concurrent.ThreadFactory; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.http.nio.netty.internal.utils.SocketChannelResolver; +import software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver; import software.amazon.awssdk.utils.ThreadFactoryBuilder; import software.amazon.awssdk.utils.Validate; @@ -39,7 +41,8 @@ * *

  • Using {@link #create(EventLoopGroup)} to provide a custom {@link EventLoopGroup}. {@link ChannelFactory} will * be resolved based on the type of {@link EventLoopGroup} provided via - * {@link SocketChannelResolver#resolveSocketChannelFactory(EventLoopGroup)} + * {@link ChannelResolver#resolveSocketChannelFactory(EventLoopGroup)} and + * {@link ChannelResolver#resolveDatagramChannelFactory(EventLoopGroup)} *
  • * *
  • Using {@link #create(EventLoopGroup, ChannelFactory)} to provide a custom {@link EventLoopGroup} and @@ -63,12 +66,14 @@ public final class SdkEventLoopGroup { private final EventLoopGroup eventLoopGroup; private final ChannelFactory channelFactory; + private final ChannelFactory datagramChannelFactory; SdkEventLoopGroup(EventLoopGroup eventLoopGroup, ChannelFactory channelFactory) { Validate.paramNotNull(eventLoopGroup, "eventLoopGroup"); Validate.paramNotNull(channelFactory, "channelFactory"); this.eventLoopGroup = eventLoopGroup; this.channelFactory = channelFactory; + this.datagramChannelFactory = ChannelResolver.resolveDatagramChannelFactory(eventLoopGroup); } /** @@ -76,7 +81,8 @@ public final class SdkEventLoopGroup { */ private SdkEventLoopGroup(DefaultBuilder builder) { this.eventLoopGroup = resolveEventLoopGroup(builder); - this.channelFactory = resolveChannelFactory(); + this.channelFactory = resolveSocketChannelFactory(builder); + this.datagramChannelFactory = resolveDatagramChannelFactory(builder); } /** @@ -93,6 +99,13 @@ public ChannelFactory channelFactory() { return channelFactory; } + /** + * @return the {@link ChannelFactory} for datagram channels to be used with Netty Http Client. + */ + public ChannelFactory datagramChannelFactory() { + return datagramChannelFactory; + } + /** * Creates a new instance of SdkEventLoopGroup with {@link EventLoopGroup} and {@link ChannelFactory} * to be used with {@link NettyNioAsyncHttpClient}. @@ -116,7 +129,7 @@ public static SdkEventLoopGroup create(EventLoopGroup eventLoopGroup, ChannelFac * @return a new instance of SdkEventLoopGroup */ public static SdkEventLoopGroup create(EventLoopGroup eventLoopGroup) { - return create(eventLoopGroup, SocketChannelResolver.resolveSocketChannelFactory(eventLoopGroup)); + return create(eventLoopGroup, ChannelResolver.resolveSocketChannelFactory(eventLoopGroup)); } public static Builder builder() { @@ -141,11 +154,22 @@ private EventLoopGroup resolveEventLoopGroup(DefaultBuilder builder) { }*/ } - private ChannelFactory resolveChannelFactory() { - // Currently we only support NioEventLoopGroup + private ChannelFactory resolveSocketChannelFactory(DefaultBuilder builder) { + return builder.channelFactory; + } + + private ChannelFactory resolveDatagramChannelFactory(DefaultBuilder builder) { + return builder.datagramChannelFactory; + } + + private static ChannelFactory defaultSocketChannelFactory() { return NioSocketChannel::new; } + private static ChannelFactory defaultDatagramChannelFactory() { + return NioDatagramChannel::new; + } + /** * A builder for {@link SdkEventLoopGroup}. * @@ -172,6 +196,24 @@ public interface Builder { */ Builder threadFactory(ThreadFactory threadFactory); + /** + * {@link ChannelFactory} to create socket channels used by the {@link EventLoopGroup}. If not set, + * NioSocketChannel is used. + * + * @param channelFactory ChannelFactory to use. + * @return This builder for method chaining. + */ + Builder channelFactory(ChannelFactory channelFactory); + + /** + * {@link ChannelFactory} to create datagram channels used by the {@link EventLoopGroup}. If not set, + * NioDatagramChannel is used. + * + * @param datagramChannelFactory ChannelFactory to use. + * @return This builder for method chaining. + */ + Builder datagramChannelFactory(ChannelFactory datagramChannelFactory); + SdkEventLoopGroup build(); } @@ -179,6 +221,8 @@ private static final class DefaultBuilder implements Builder { private Integer numberOfThreads; private ThreadFactory threadFactory; + private ChannelFactory channelFactory = defaultSocketChannelFactory(); + private ChannelFactory datagramChannelFactory = defaultDatagramChannelFactory(); private DefaultBuilder() { } @@ -203,6 +247,26 @@ public void setThreadFactory(ThreadFactory threadFactory) { threadFactory(threadFactory); } + @Override + public Builder channelFactory(ChannelFactory channelFactory) { + this.channelFactory = channelFactory; + return this; + } + + public void setChannelFactory(ChannelFactory channelFactory) { + channelFactory(channelFactory); + } + + @Override + public Builder datagramChannelFactory(ChannelFactory datagramChannelFactory) { + this.datagramChannelFactory = datagramChannelFactory; + return this; + } + + public void setDatagramChannelFactory(ChannelFactory datagramChannelFactory) { + datagramChannelFactory(datagramChannelFactory); + } + @Override public SdkEventLoopGroup build() { return new SdkEventLoopGroup(this); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java index 1d55e1841aa2..fbd727239239 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java @@ -83,6 +83,7 @@ public void channelCreated(Channel ch) throws Exception { private final ProxyConfiguration proxyConfiguration; private final BootstrapProvider bootstrapProvider; private final SslContextProvider sslContextProvider; + private final Boolean useNonBlockingDnsResolver; private AwaitCloseChannelPoolMap(Builder builder, Function createBootStrapProvider) { this.configuration = builder.configuration; @@ -94,6 +95,7 @@ private AwaitCloseChannelPoolMap(Builder builder, Function init(ChannelFactory datagramChannelFactory) { + try { + Class addressResolver = ClassLoaderHelper.loadClass(getAddressResolverGroup(), false, (Class) null); + Class dnsNameResolverBuilder = ClassLoaderHelper.loadClass(getDnsNameResolverBuilder(), false, (Class) null); + + Object dnsResolverObj = dnsNameResolverBuilder.newInstance(); + Method method = dnsResolverObj.getClass().getMethod("channelFactory", ChannelFactory.class); + method.invoke(dnsResolverObj, datagramChannelFactory); + + Object e = addressResolver.getConstructor(dnsNameResolverBuilder).newInstance(dnsResolverObj); + return (AddressResolverGroup) e; + } catch (ClassNotFoundException e) { + throw new IllegalStateException("Cannot find module io.netty.resolver.dns " + + " To use netty non blocking dns," + + " the 'netty-resolver-dns' module from io.netty must be on the class path. ", e); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException | InstantiationException e) { + throw new IllegalStateException("Failed to create AddressResolverGroup", e); + } + } + + private static String getAddressResolverGroup() { + return "io.netty.resolver.dns.DnsAddressResolverGroup"; + } + + private static String getDnsNameResolverBuilder() { + return "io.netty.resolver.dns.DnsNameResolverBuilder"; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java new file mode 100644 index 000000000000..8770d683a679 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelFactory; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ReflectiveChannelFactory; +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollSocketChannel; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import java.util.HashMap; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; + +@SdkInternalApi +public final class ChannelResolver { + + private static final Map KNOWN_EL_GROUPS_SOCKET_CHANNELS = new HashMap<>(); + private static final Map KNOWN_EL_GROUPS_DATAGRAM_CHANNELS = new HashMap<>(); + + static { + KNOWN_EL_GROUPS_SOCKET_CHANNELS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", + "io.netty.channel.kqueue.KQueueSocketChannel"); + KNOWN_EL_GROUPS_SOCKET_CHANNELS.put("io.netty.channel.oio.OioEventLoopGroup", + "io.netty.channel.socket.oio.OioSocketChannel"); + + KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", + "io.netty.channel.kqueue.KQueueDatagramChannel"); + KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.put("io.netty.channel.oio.OioEventLoopGroup", + "io.netty.channel.socket.oio.OioDatagramChannel"); + } + + private ChannelResolver() { + } + + /** + * Attempts to determine the {@link ChannelFactory} class that corresponds to the given + * event loop group. + * + * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for + * @return A {@link ChannelFactory} instance for the given event loop group. + */ + @SuppressWarnings("unchecked") + public static ChannelFactory resolveSocketChannelFactory(EventLoopGroup eventLoopGroup) { + if (eventLoopGroup instanceof DelegatingEventLoopGroup) { + return resolveSocketChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); + } + + if (eventLoopGroup instanceof NioEventLoopGroup) { + return NioSocketChannel::new; + } + if (eventLoopGroup instanceof EpollEventLoopGroup) { + return EpollSocketChannel::new; + } + + String socketFqcn = KNOWN_EL_GROUPS_SOCKET_CHANNELS.get(eventLoopGroup.getClass().getName()); + if (socketFqcn == null) { + throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); + } + + return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(socketFqcn))); + } + + /** + * Attempts to determine the {@link ChannelFactory} class for datagram channels that corresponds to the given + * event loop group. + * + * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for + * @return A {@link ChannelFactory} instance for the given event loop group. + */ + @SuppressWarnings("unchecked") + public static ChannelFactory resolveDatagramChannelFactory(EventLoopGroup eventLoopGroup) { + if (eventLoopGroup instanceof DelegatingEventLoopGroup) { + return resolveDatagramChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); + } + + if (eventLoopGroup instanceof NioEventLoopGroup) { + return NioDatagramChannel::new; + } + if (eventLoopGroup instanceof EpollEventLoopGroup) { + return EpollDatagramChannel::new; + } + + String datagramFqcn = KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.get(eventLoopGroup.getClass().getName()); + if (datagramFqcn == null) { + throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); + } + + return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(datagramFqcn))); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java deleted file mode 100644 index 1d80dad5850f..000000000000 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.http.nio.netty.internal.utils; - -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFactory; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.ReflectiveChannelFactory; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.epoll.EpollSocketChannel; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; - -@SdkInternalApi -public final class SocketChannelResolver { - - private static final Map KNOWN_EL_GROUPS = new HashMap<>(); - - static { - KNOWN_EL_GROUPS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", "io.netty.channel.kqueue.KQueueSocketChannel"); - KNOWN_EL_GROUPS.put("io.netty.channel.oio.OioEventLoopGroup", "io.netty.channel.socket.oio.OioSocketChannel"); - } - - private SocketChannelResolver() { - } - - /** - * Attempts to determine the {@link ChannelFactory} class that corresponds to the given - * event loop group. - * - * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for - * @return A {@link ChannelFactory} instance for the given event loop group. - */ - @SuppressWarnings("unchecked") - public static ChannelFactory resolveSocketChannelFactory(EventLoopGroup eventLoopGroup) { - if (eventLoopGroup instanceof DelegatingEventLoopGroup) { - return resolveSocketChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); - } - - if (eventLoopGroup instanceof NioEventLoopGroup) { - return NioSocketChannel::new; - } - if (eventLoopGroup instanceof EpollEventLoopGroup) { - return EpollSocketChannel::new; - } - - String socketFqcn = KNOWN_EL_GROUPS.get(eventLoopGroup.getClass().getName()); - if (socketFqcn == null) { - throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); - } - - return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(socketFqcn))); - } -} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java index dc7c408c3c9f..f35c0914609d 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.FileStoreTlsKeyManagersProvider; import software.amazon.awssdk.http.HttpTestUtils; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.TlsKeyManagersProvider; @@ -185,6 +186,24 @@ public void nonProxy_noKeyManagerGiven_shouldThrowException() { .hasRootCauseInstanceOf(SSLException.class); } + @Test + public void builderUsesProvidedKeyManagersProviderNonBlockingDns() { + TlsKeyManagersProvider mockKeyManagersProvider = mock(TlsKeyManagersProvider.class); + netty = NettyNioAsyncHttpClient.builder() + .useNonBlockingDnsResolver(true) + .proxyConfiguration(proxyCfg) + .tlsKeyManagersProvider(mockKeyManagersProvider) + .buildWithDefaults(AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, true) + .build()); + + try { + sendRequest(netty, new RecordingResponseHandler()); + } catch (Exception ignored) { + } + verify(mockKeyManagersProvider).keyManagers(); + } + private void sendRequest(SdkAsyncHttpClient client, SdkAsyncHttpResponseHandler responseHandler) { AsyncExecuteRequest req = AsyncExecuteRequest.builder() .request(testSdkRequest()) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java new file mode 100644 index 000000000000..9535c41c2b0a --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java @@ -0,0 +1,171 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static java.util.Collections.singletonMap; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.commons.lang3.StringUtils.reverse; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.assertCanReceiveBasicRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createProvider; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.makeSimpleRequest; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.assertj.core.api.Condition; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.utils.AttributeMap; + +@RunWith(MockitoJUnitRunner.class) +public class NettyNioAsyncHttpClientNonBlockingDnsTest { + + private final RecordingNetworkTrafficListener wiremockTrafficListener = new RecordingNetworkTrafficListener(); + + private static final SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .useNonBlockingDnsResolver(true) + .buildWithDefaults( + AttributeMap.builder() + .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true) + .build()); + @Rule + public WireMockRule mockServer = new WireMockRule(wireMockConfig() + .dynamicPort() + .dynamicHttpsPort() + .networkTrafficListener(wiremockTrafficListener)); + + @Before + public void methodSetup() { + wiremockTrafficListener.reset(); + } + + @AfterClass + public static void tearDown() throws Exception { + client.close(); + } + + @Test + public void canSendContentAndGetThatContentBackNonBlockingDns() throws Exception { + String body = randomAlphabetic(50); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(body)) + .willReturn(aResponse().withBody(reverse(body)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpRequest request = createRequest(uri, "/echo", body, SdkHttpMethod.POST, singletonMap("reversed", "true")); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(body)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + verify(1, postRequestedFor(urlEqualTo("/echo?reversed=true"))); + + assertThat(recorder.fullResponseAsString()).isEqualTo(reverse(body)); + } + + @Test + public void defaultThreadFactoryUsesHelpfulName() throws Exception { + // Make a request to ensure a thread is primed + makeSimpleRequest(client, mockServer); + + String expectedPattern = "aws-java-sdk-NettyEventLoop-\\d+-\\d+"; + assertThat(Thread.getAllStackTraces().keySet()) + .areAtLeast(1, new Condition<>(t -> t.getName().matches(expectedPattern), + "Matches default thread pattern: `%s`", expectedPattern)); + } + + @Test + public void canMakeBasicRequestOverHttp() throws Exception { + String smallBody = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(client, uri, smallBody); + } + + @Test + public void canMakeBasicRequestOverHttps() throws Exception { + String smallBody = randomAlphabetic(10); + URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); + + assertCanReceiveBasicRequest(client, uri, smallBody); + } + + @Test + public void canHandleLargerPayloadsOverHttp() throws Exception { + String largishBody = randomAlphabetic(25000); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(client, uri, largishBody); + } + + @Test + public void canHandleLargerPayloadsOverHttps() throws Exception { + String largishBody = randomAlphabetic(25000); + + URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); + + assertCanReceiveBasicRequest(client, uri, largishBody); + } + + @Test + public void requestContentOnlyEqualToContentLengthHeaderFromProvider() throws InterruptedException, ExecutionException, TimeoutException, IOException { + final String content = randomAlphabetic(32); + final String streamContent = content + reverse(content); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(content)) + .willReturn(aResponse().withBody(reverse(content)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpFullRequest request = createRequest(uri, "/echo", streamContent, SdkHttpMethod.POST, singletonMap("reversed", "true")); + request = request.toBuilder().putHeader("Content-Length", Integer.toString(content.length())).build(); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(streamContent)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + // HTTP servers will stop processing the request as soon as it reads + // bytes equal to 'Content-Length' so we need to inspect the raw + // traffic to ensure that there wasn't anything after that. + assertThat(wiremockTrafficListener.requests().toString()).endsWith(content); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java new file mode 100644 index 000000000000..04f9a906ee04 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyMap; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; + +public class NettyNioAsyncHttpClientTestUtils { + + /** + * Make a simple async request and wait for it to fiish. + * + * @param client Client to make request with. + */ + public static void makeSimpleRequest(SdkAsyncHttpClient client, WireMockServer mockServer) throws Exception { + String body = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); + SdkHttpRequest request = createRequest(uri); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + recorder.completeFuture.get(5, TimeUnit.SECONDS); + } + + public static SdkHttpContentPublisher createProvider(String body) { + Stream chunks = splitStringBySize(body).stream() + .map(chunk -> ByteBuffer.wrap(chunk.getBytes(UTF_8))); + return new SdkHttpContentPublisher() { + + @Override + public Optional contentLength() { + return Optional.of(Long.valueOf(body.length())); + } + + @Override + public void subscribe(Subscriber s) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + chunks.forEach(s::onNext); + s.onComplete(); + } + + @Override + public void cancel() { + + } + }); + } + }; + } + + public static SdkHttpFullRequest createRequest(URI uri) { + return createRequest(uri, "/", null, SdkHttpMethod.GET, emptyMap()); + } + + public static SdkHttpFullRequest createRequest(URI uri, + String resourcePath, + String body, + SdkHttpMethod method, + Map params) { + String contentLength = body == null ? null : String.valueOf(body.getBytes(UTF_8).length); + return SdkHttpFullRequest.builder() + .uri(uri) + .method(method) + .encodedPath(resourcePath) + .applyMutation(b -> params.forEach(b::putRawQueryParameter)) + .applyMutation(b -> { + b.putHeader("Host", uri.getHost()); + if (contentLength != null) { + b.putHeader("Content-Length", contentLength); + } + }).build(); + } + + public static void assertCanReceiveBasicRequest(SdkAsyncHttpClient client, URI uri, String body) throws Exception { + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withHeader("Some-Header", "With Value").withBody(body))); + + SdkHttpRequest request = createRequest(uri); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + assertThat(recorder.responses).hasOnlyOneElementSatisfying( + headerResponse -> { + assertThat(headerResponse.headers()).containsKey("Some-Header"); + assertThat(headerResponse.statusCode()).isEqualTo(200); + }); + + assertThat(recorder.fullResponseAsString()).isEqualTo(body); + verify(1, getRequestedFor(urlMatching("/"))); + } + + private static Collection splitStringBySize(String str) { + if (isBlank(str)) { + return Collections.emptyList(); + } + ArrayList split = new ArrayList<>(); + for (int i = 0; i <= str.length() / 1000; i++) { + split.add(str.substring(i * 1000, Math.min((i + 1) * 1000, str.length()))); + } + return split; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java index 9a1121e201f5..116119d36ea5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java @@ -18,19 +18,14 @@ import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; import static com.github.tomakehurst.wiremock.client.WireMock.any; import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; -import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static com.github.tomakehurst.wiremock.client.WireMock.verify; import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; -import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.reverse; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -40,6 +35,10 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.assertCanReceiveBasicRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createProvider; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.makeSimpleRequest; import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.http.Fault; @@ -49,25 +48,22 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslProvider; import io.netty.util.AttributeKey; import java.io.IOException; import java.net.URI; -import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; import javax.net.ssl.TrustManagerFactory; import org.assertj.core.api.Condition; import org.junit.AfterClass; @@ -78,8 +74,6 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.HttpTestUtils; import software.amazon.awssdk.http.SdkHttpConfigurationOption; @@ -88,7 +82,6 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.async.SdkHttpContentPublisher; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; @@ -183,7 +176,8 @@ public void invalidMaxPendingConnectionAcquireConfig_shouldPropagateException() .maxConcurrency(1) .maxPendingConnectionAcquires(0) .build()) { - assertThatThrownBy(() -> makeSimpleRequest(customClient)).hasMessageContaining("java.lang.IllegalArgumentException: maxPendingAcquires: 0 (expected: >= 1)"); + assertThatThrownBy(() -> makeSimpleRequest(customClient, mockServer)).hasMessageContaining("java.lang" + + ".IllegalArgumentException: maxPendingAcquires: 0 (expected: >= 1)"); } } @@ -196,7 +190,7 @@ public void customFactoryIsUsed() throws Exception { .threadFactory(threadFactory)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any()); @@ -208,7 +202,7 @@ public void openSslBeingUsed() throws Exception { NettyNioAsyncHttpClient.builder() .sslProvider(SslProvider.OPENSSL) .build()) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); } } @@ -218,7 +212,7 @@ public void defaultJdkSslProvider() throws Exception { NettyNioAsyncHttpClient.builder() .sslProvider(SslProvider.JDK) .build()) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); } } @@ -226,7 +220,7 @@ public void defaultJdkSslProvider() throws Exception { @Test public void defaultThreadFactoryUsesHelpfulName() throws Exception { // Make a request to ensure a thread is primed - makeSimpleRequest(client); + makeSimpleRequest(client, mockServer); String expectedPattern = "aws-java-sdk-NettyEventLoop-\\d+-\\d+"; assertThat(Thread.getAllStackTraces().keySet()) @@ -247,7 +241,7 @@ public void customThreadCountIsRespected() throws Exception { // Have to make enough requests to prime the threads for (int i = 0; i < threadCount + 1; i++) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); } customClient.close(); @@ -267,7 +261,7 @@ public void customEventLoopGroup_NotClosedWhenClientIsClosed() throws Exception .eventLoopGroup(SdkEventLoopGroup.create(eventLoopGroup, NioSocketChannel::new)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any()); @@ -287,7 +281,7 @@ public void customChannelFactoryIsUsed() throws Exception { .eventLoopGroup(SdkEventLoopGroup.create(customEventLoopGroup, channelFactory)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(channelFactory, atLeastOnce()).newChannel(); @@ -335,7 +329,7 @@ public void responseConnectionReused_shouldReleaseChannel() throws Exception { .maxConcurrency(1) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); verifyChannelRelease(channel); assertThat(channel.isShutdown()).isFalse(); @@ -446,27 +440,12 @@ public void builderUsesProvidedTrustManagersProvider() throws Exception { } } - /** - * Make a simple async request and wait for it to fiish. - * - * @param client Client to make request with. - */ - private void makeSimpleRequest(SdkAsyncHttpClient client) throws Exception { - String body = randomAlphabetic(10); - URI uri = URI.create("http://localhost:" + mockServer.port()); - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); - SdkHttpRequest request = createRequest(uri); - RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); - recorder.completeFuture.get(5, TimeUnit.SECONDS); - } - @Test public void canMakeBasicRequestOverHttp() throws Exception { String smallBody = randomAlphabetic(10); URI uri = URI.create("http://localhost:" + mockServer.port()); - assertCanReceiveBasicRequest(uri, smallBody); + assertCanReceiveBasicRequest(client, uri, smallBody); } @Test @@ -474,7 +453,7 @@ public void canMakeBasicRequestOverHttps() throws Exception { String smallBody = randomAlphabetic(10); URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); - assertCanReceiveBasicRequest(uri, smallBody); + assertCanReceiveBasicRequest(client, uri, smallBody); } @Test @@ -483,7 +462,7 @@ public void canHandleLargerPayloadsOverHttp() throws Exception { URI uri = URI.create("http://localhost:" + mockServer.port()); - assertCanReceiveBasicRequest(uri, largishBody); + assertCanReceiveBasicRequest(client, uri, largishBody); } @Test @@ -492,7 +471,7 @@ public void canHandleLargerPayloadsOverHttps() throws Exception { URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); - assertCanReceiveBasicRequest(uri, largishBody); + assertCanReceiveBasicRequest(client, uri, largishBody); } @Test @@ -579,88 +558,6 @@ public ChannelFuture close() { assertThat(channelClosedFuture.get(5, TimeUnit.SECONDS)).isTrue(); } - private void assertCanReceiveBasicRequest(URI uri, String body) throws Exception { - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withHeader("Some-Header", "With Value").withBody(body))); - - SdkHttpRequest request = createRequest(uri); - - RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); - - recorder.completeFuture.get(5, TimeUnit.SECONDS); - - assertThat(recorder.responses).hasOnlyOneElementSatisfying( - headerResponse -> { - assertThat(headerResponse.headers()).containsKey("Some-Header"); - assertThat(headerResponse.statusCode()).isEqualTo(200); - }); - - assertThat(recorder.fullResponseAsString()).isEqualTo(body); - verify(1, getRequestedFor(urlMatching("/"))); - } - - private SdkHttpContentPublisher createProvider(String body) { - Stream chunks = splitStringBySize(body).stream() - .map(chunk -> ByteBuffer.wrap(chunk.getBytes(UTF_8))); - return new SdkHttpContentPublisher() { - - @Override - public Optional contentLength() { - return Optional.of(Long.valueOf(body.length())); - } - - @Override - public void subscribe(Subscriber s) { - s.onSubscribe(new Subscription() { - @Override - public void request(long n) { - chunks.forEach(s::onNext); - s.onComplete(); - } - - @Override - public void cancel() { - - } - }); - } - }; - } - - private SdkHttpFullRequest createRequest(URI uri) { - return createRequest(uri, "/", null, SdkHttpMethod.GET, emptyMap()); - } - - private SdkHttpFullRequest createRequest(URI uri, - String resourcePath, - String body, - SdkHttpMethod method, - Map params) { - String contentLength = body == null ? null : String.valueOf(body.getBytes(UTF_8).length); - return SdkHttpFullRequest.builder() - .uri(uri) - .method(method) - .encodedPath(resourcePath) - .applyMutation(b -> params.forEach(b::putRawQueryParameter)) - .applyMutation(b -> { - b.putHeader("Host", uri.getHost()); - if (contentLength != null) { - b.putHeader("Content-Length", contentLength); - } - }).build(); - } - - private static Collection splitStringBySize(String str) { - if (isBlank(str)) { - return Collections.emptyList(); - } - ArrayList split = new ArrayList<>(); - for (int i = 0; i <= str.length() / 1000; i++) { - split.add(str.substring(i * 1000, Math.min((i + 1) * 1000, str.length()))); - } - return split; - } - // Needs to be a non-anon class in order to spy public static class CustomThreadFactory implements ThreadFactory { @Override @@ -719,7 +616,7 @@ public void createNettyClient_ReadWriteTimeoutCanBeZero() throws Exception { .writeTimeout(Duration.ZERO) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java index f797a760fdf7..438d65e1f9fc 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java @@ -126,6 +126,30 @@ public void proxyConfigured_hostInNonProxySet_doesNotConnect() { assertThat(responseHandler.fullResponseAsString()).isEqualTo("hello"); } + @Test + public void proxyConfigured_hostInNonProxySet_nonBlockingDns_doesNotConnect() { + RecordingResponseHandler responseHandler = new RecordingResponseHandler(); + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .request(testSdkRequest()) + .responseHandler(responseHandler) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + ProxyConfiguration cfg = proxyCfg.toBuilder() + .nonProxyHosts(Stream.of("localhost").collect(Collectors.toSet())) + .build(); + + client = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(cfg) + .useNonBlockingDnsResolver(true) + .build(); + + client.execute(req).join(); + + responseHandler.completeFuture.join(); + assertThat(responseHandler.fullResponseAsString()).isEqualTo("hello"); + } + private SdkHttpFullRequest testSdkRequest() { return SdkHttpFullRequest.builder() .method(SdkHttpMethod.GET) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java index a3ae76469359..bb2598345cff 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java @@ -18,8 +18,15 @@ import static org.assertj.core.api.Assertions.assertThat; import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.oio.OioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; +import io.netty.channel.socket.oio.OioSocketChannel; import org.junit.Test; public class SdkEventLoopGroupTest { @@ -28,13 +35,24 @@ public class SdkEventLoopGroupTest { public void creatingUsingBuilder() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.builder().numberOfThreads(1).build(); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory()).isNotNull(); assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); } @Test - public void creatingUsingStaticMethod() { + public void creatingUsingStaticMethod_A() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new NioEventLoopGroup(), NioSocketChannel::new); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(NioDatagramChannel.class); + assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); + } + + @Test + public void creatingUsingStaticMethod_B() { + SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new OioEventLoopGroup(), OioSocketChannel::new); + assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(OioDatagramChannel.class); assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); } @@ -43,6 +61,7 @@ public void notProvidingChannelFactory_channelFactoryResolved() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new NioEventLoopGroup()); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test(expected = IllegalArgumentException.class) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java index 3b72f71be4db..17289d1ca3b3 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java @@ -118,7 +118,7 @@ public void get_callsInjectedBootstrapProviderCorrectly() { channelPoolMap = new AwaitCloseChannelPoolMap(builder, null, bootstrapProvider); channelPoolMap.get(targetUri); - verify(bootstrapProvider).createBootstrap("some-awesome-service-1234.amazonaws.com", 8080); + verify(bootstrapProvider).createBootstrap("some-awesome-service-1234.amazonaws.com", 8080, null); } @Test @@ -151,7 +151,7 @@ public void get_usingProxy_callsInjectedBootstrapProviderCorrectly() { channelPoolMap = new AwaitCloseChannelPoolMap(builder, shouldProxyCache, bootstrapProvider); channelPoolMap.get(targetUri); - verify(bootstrapProvider).createBootstrap("localhost", mockProxy.port()); + verify(bootstrapProvider).createBootstrap("localhost", mockProxy.port(), null); } @Test diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java index 337cb7ba2ec2..914587b85df3 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java @@ -42,7 +42,19 @@ public class BootstrapProviderTest { // connection attempt and not cached between connection attempts. @Test public void createBootstrap_usesUnresolvedInetSocketAddress() { - Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); + + SocketAddress socketAddress = bootstrap.config().remoteAddress(); + + assertThat(socketAddress).isInstanceOf(InetSocketAddress.class); + InetSocketAddress inetSocketAddress = (InetSocketAddress)socketAddress; + + assertThat(inetSocketAddress.isUnresolved()).isTrue(); + } + + @Test + public void createBootstrapNonBlockingDns_usesUnresolvedInetSocketAddress() { + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, true); SocketAddress socketAddress = bootstrap.config().remoteAddress(); @@ -54,7 +66,7 @@ public void createBootstrap_usesUnresolvedInetSocketAddress() { @Test public void createBootstrap_defaultConfiguration_tcpKeepAliveShouldBeFalse() { - Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); Boolean keepAlive = (Boolean) bootstrap.config().options().get(ChannelOption.SO_KEEPALIVE); assertThat(keepAlive).isFalse(); @@ -70,7 +82,7 @@ public void createBootstrap_tcpKeepAliveTrue_shouldApply() { nettyConfiguration, new SdkChannelOptions()); - Bootstrap bootstrap = provider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = provider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); Boolean keepAlive = (Boolean) bootstrap.config().options().get(ChannelOption.SO_KEEPALIVE); assertThat(keepAlive).isTrue(); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java new file mode 100644 index 000000000000..40db804aacaf --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; +import io.netty.resolver.dns.DnsAddressResolverGroup; +import org.junit.jupiter.api.Test; + +public class DnsResolverLoaderTest { + + @Test + public void canResolveChannelFactory() { + assertThat(DnsResolverLoader.init(NioDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + assertThat(DnsResolverLoader.init(EpollDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + assertThat(DnsResolverLoader.init(OioDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java similarity index 70% rename from http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java rename to http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java index 472c417d4485..45edd2b81bb1 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java @@ -16,39 +16,47 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.http.nio.netty.internal.utils.SocketChannelResolver.resolveSocketChannelFactory; +import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver.resolveDatagramChannelFactory; +import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver.resolveSocketChannelFactory; import io.netty.channel.epoll.Epoll; +import io.netty.channel.epoll.EpollDatagramChannel; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.oio.OioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; import io.netty.channel.socket.oio.OioSocketChannel; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.Test; import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; -public class SocketChannelResolverTest { +public class ChannelResolverTest { @Test public void canDetectFactoryForStandardNioEventLoopGroup() { assertThat(resolveSocketChannelFactory(new NioEventLoopGroup()).newChannel()).isInstanceOf(NioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new NioEventLoopGroup()).newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test public void canDetectEpollEventLoopGroupFactory() { Assumptions.assumeTrue(Epoll.isAvailable()); assertThat(resolveSocketChannelFactory(new EpollEventLoopGroup()).newChannel()).isInstanceOf(EpollSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new EpollEventLoopGroup()).newChannel()).isInstanceOf(EpollDatagramChannel.class); } @Test public void worksWithDelegateEventLoopGroupsFactory() { assertThat(resolveSocketChannelFactory(new DelegatingEventLoopGroup(new NioEventLoopGroup()) {}).newChannel()).isInstanceOf(NioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new DelegatingEventLoopGroup(new NioEventLoopGroup()) {}).newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test public void worksWithOioEventLoopGroupFactory() { assertThat(resolveSocketChannelFactory(new OioEventLoopGroup()).newChannel()).isInstanceOf(OioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new OioEventLoopGroup()).newChannel()).isInstanceOf(OioDatagramChannel.class); } } diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 215c88e3b607..6eb6972bcab5 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index ce4a9e4bcdb4..8e86b6129823 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index bfbbefcd15da..5fa3f415b02b 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index ed1280e30fb4..cd9b49acaebd 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index eef73675ef92..b91ae696a996 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -87,10 +87,11 @@ ${scm.github.url} + ${scm.github.connection} ${project.version} - 2.20.67 + 2.20.92 2.13.2 2.13.4.2 2.13.2 @@ -114,7 +115,7 @@ 2.2.21 1.15 1.29 - 0.21.12 + 0.22.2 5.8.1 @@ -173,7 +174,8 @@ ${skipTests} ${project.basedir}/src/it/java ${session.executionRootDirectory} - https://github.com/aws/aws-sdk-java-v2.git + https://github.com/aws/aws-sdk-java-v2 + scm:git:git://github.com/aws/aws-sdk-java-v2.git @@ -693,9 +695,6 @@ publishing - - https://github.com/aws/aws-sdk-java-v2/tree/release - diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 550cc0900420..98ec1d277261 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index a303372d4725..fe0749340b44 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java index 45db89f5283c..88cfbe39e82f 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java @@ -43,7 +43,6 @@ import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DocumentAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DurationAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnumAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.IntegerAttributeConverter; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java index 256cd3e9fdba..5c2448a985d7 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbEnhancedClient.java @@ -269,7 +269,7 @@ default BatchWriteResult batchWriteItem(Consumer @@ -297,6 +297,7 @@ default BatchWriteResult batchWriteItem(Consumer + * See {@link DynamoDbClient#transactGetItems(Consumer)} to learn more about {@code TransactGetItems}. * * @param request A {@link TransactGetItemsEnhancedRequest} containing keys with table references. * @return a list of {@link Document} with the results. @@ -307,7 +308,7 @@ default List transactGetItems(TransactGetItemsEnhancedRequest request) /** * Retrieves multiple items from one or more tables in a single atomic transaction. TransactGetItem is a composite operation - * where the request contains a set of up to 25 get requests, each containing a table reference and a + * where the request contains a set of get requests, each containing a table reference and a * {@link GetItemEnhancedRequest}. The list of results correspond to the ordering of the request definitions; for example * the third addGetItem() call on the request builder will match the third result (index 2) of the result. *

    @@ -336,9 +337,12 @@ default List transactGetItems(TransactGetItemsEnhancedRequest request) * MyItem item = results.get(3).getItem(secondItemTable); * } * + *

    + * See {@link DynamoDbClient#transactGetItems(Consumer)} to learn more about {@code TransactGetItems}. * * @param requestConsumer a {@link Consumer} of {@link TransactGetItemsEnhancedRequest} containing keys with table references. * @return a list of {@link Document} with the results. + * */ default List transactGetItems(Consumer requestConsumer) { throw new UnsupportedOperationException(); @@ -346,7 +350,7 @@ default List transactGetItems(Consumer *

  • Condition check of item - {@link ConditionCheck}
  • @@ -384,6 +388,7 @@ default List transactGetItems(Consumer + * See {@link DynamoDbClient#transactWriteItems(Consumer)} to learn more about {@code TransactWriteItems}. * * @param request A {@link BatchWriteItemEnhancedRequest} containing keys grouped by tables. */ @@ -393,7 +398,7 @@ default Void transactWriteItems(TransactWriteItemsEnhancedRequest request) { /** * Writes and/or modifies multiple items from one or more tables in a single atomic transaction. TransactGetItem is a - * composite operation where the request contains a set of up to 25 action requests, each containing a table reference and + * composite operation where the request contains a set of action requests, each containing a table reference and * one of the following requests: *
      *
    • Condition check of item - {@link ConditionCheck}
    • @@ -427,6 +432,7 @@ default Void transactWriteItems(TransactWriteItemsEnhancedRequest request) { * .addUpdateItem(secondItemTable, i -> i.item(item4))); * } * + * See {@link DynamoDbClient#transactWriteItems(Consumer)} to learn more about {@code TransactWriteItems}. * * @param requestConsumer a {@link Consumer} of {@link TransactWriteItemsEnhancedRequest} containing keys and items grouped * by tables. diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java new file mode 100644 index 000000000000..a44a5e2070f0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * A converter between an {@link Enum} and {@link AttributeValue}. + * + *

      + * This stores values in DynamoDB as a string. + * + *

      + * Use EnumAttributeConverter::create in order to use Enum::toString as the enum identifier + * + *

      + * Use EnumAttributeConverter::createWithNameAsKeys in order to use Enum::name as the enum identifier + * + *

      + * This can be created via {@link #create(Class)}. + */ +@SdkPublicApi +public final class EnumAttributeConverter> implements AttributeConverter { + + private final Class enumClass; + private final Map enumValueMap; + + private final Function keyExtractor; + + private EnumAttributeConverter(Class enumClass, Function keyExtractor) { + this.enumClass = enumClass; + this.keyExtractor = keyExtractor; + + Map mutableEnumValueMap = new LinkedHashMap<>(); + Arrays.stream(enumClass.getEnumConstants()) + .forEach(enumConstant -> mutableEnumValueMap.put(keyExtractor.apply(enumConstant), enumConstant)); + + this.enumValueMap = Collections.unmodifiableMap(mutableEnumValueMap); + } + + /** + * Creates an EnumAttributeConverter for an {@link Enum}. + * + *

      + * Uses Enum::toString as the enum identifier. + * + * @param enumClass The enum class to be used + * @return an EnumAttributeConverter + * @param the enum subclass + */ + public static > EnumAttributeConverter create(Class enumClass) { + return new EnumAttributeConverter<>(enumClass, Enum::toString); + } + + /** + * Creates an EnumAttributeConverter for an {@link Enum}. + * + *

      + * Uses Enum::name as the enum identifier. + * + * @param enumClass The enum class to be used + * @return an EnumAttributeConverter + * @param the enum subclass + */ + public static > EnumAttributeConverter createWithNameAsKeys(Class enumClass) { + return new EnumAttributeConverter<>(enumClass, Enum::name); + } + + /** + * Returns the proper {@link AttributeValue} for the given enum type. + * + * @param input the enum type to be converted + * @return AttributeValue + */ + @Override + public AttributeValue transformFrom(T input) { + return AttributeValue.builder().s(keyExtractor.apply(input)).build(); + } + + /** + * Returns the proper enum type for the given {@link AttributeValue} input. + * + * @param input the AttributeValue to be converted + * @return an enum type + */ + @Override + public T transformTo(AttributeValue input) { + Validate.isTrue(input.s() != null, "Cannot convert non-string value to enum."); + T returnValue = enumValueMap.get(input.s()); + + if (returnValue == null) { + throw new IllegalArgumentException(String.format("Unable to convert string value '%s' to enum type '%s'", + input.s(), enumClass)); + } + + return returnValue; + } + + /** + * Returns the {@link EnhancedType} of the converter. + * + * @return EnhancedType + */ + @Override + public EnhancedType type() { + return EnhancedType.of(enumClass); + } + + /** + * Returns the {@link AttributeValueType} of the converter. + * + * @return AttributeValueType + */ + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java index bdf4ee35cbdb..2aa9d100d2c2 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java @@ -53,6 +53,18 @@ static StaticTableSchema.Builder builder(Class itemClass) { return StaticTableSchema.builder(itemClass); } + /** + * Returns a builder for the {@link StaticTableSchema} implementation of this interface which allows all attributes, + * tags and table structure to be directly declared in the builder. + * + * @param itemType The {@link EnhancedType} of the item this {@link TableSchema} will map records to. + * @param The type of the item this {@link TableSchema} will map records to. + * @return A newly initialized {@link StaticTableSchema.Builder}. + */ + static StaticTableSchema.Builder builder(EnhancedType itemType) { + return StaticTableSchema.builder(itemType); + } + /** * Returns a builder for the {@link StaticImmutableTableSchema} implementation of this interface which allows all * attributes, tags and table structure to be directly declared in the builder. @@ -69,6 +81,22 @@ static StaticImmutableTableSchema.Builder builder(Class immutabl return StaticImmutableTableSchema.builder(immutableItemClass, immutableBuilderClass); } + /** + * Returns a builder for the {@link StaticImmutableTableSchema} implementation of this interface which allows all + * attributes, tags and table structure to be directly declared in the builder. + * + * @param immutableItemType The {@link EnhancedType} of the immutable item this {@link TableSchema} will map records to. + * @param immutableBuilderType The {@link EnhancedType} of the class that can be used to construct immutable items this + * {@link TableSchema} maps records to. + * @param The type of the immutable item this {@link TableSchema} will map records to. + * @param The type of the builder used by this {@link TableSchema} to construct immutable items with. + * @return A newly initialized {@link StaticImmutableTableSchema.Builder} + */ + static StaticImmutableTableSchema.Builder builder(EnhancedType immutableItemType, + EnhancedType immutableBuilderType) { + return StaticImmutableTableSchema.builder(immutableItemType, immutableBuilderType); + } + /** * Scans a bean class that has been annotated with DynamoDb bean annotations and then returns a * {@link BeanTableSchema} implementation of this interface that can map records to and from items of that bean diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java index 6c9f0f69265e..8b6d8412969b 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java @@ -15,13 +15,20 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; +import static java.util.Collections.emptyList; import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.function.Consumer; +import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; @@ -39,6 +46,8 @@ import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.DescribeTableEnhancedResponse; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; @@ -51,6 +60,7 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; @SdkInternalApi public class DefaultDynamoDbTable implements DynamoDbTable { @@ -115,7 +125,51 @@ public void createTable(Consumer requestCons @Override public void createTable() { - createTable(CreateTableEnhancedRequest.builder().build()); + Map> indexGroups = splitSecondaryIndicesToLocalAndGlobalOnes(); + createTable(CreateTableEnhancedRequest.builder() + .localSecondaryIndices(extractLocalSecondaryIndices(indexGroups)) + .globalSecondaryIndices(extractGlobalSecondaryIndices(indexGroups)) + .build()); + } + + private Map> splitSecondaryIndicesToLocalAndGlobalOnes() { + String primaryPartitionKeyName = tableSchema.tableMetadata().primaryPartitionKey(); + Collection indices = tableSchema.tableMetadata().indices(); + return indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .collect(Collectors.groupingBy(metadata -> { + String partitionKeyName = metadata.partitionKey().map(KeyAttributeMetadata::name).orElse(null); + if (partitionKeyName == null || primaryPartitionKeyName.equals(partitionKeyName)) { + return IndexType.LSI; + } + return IndexType.GSI; + })); + } + + private List extractLocalSecondaryIndices(Map> indicesGroups) { + return indicesGroups.getOrDefault(IndexType.LSI, emptyList()).stream() + .map(this::mapIndexMetadataToEnhancedLocalSecondaryIndex) + .collect(Collectors.toList()); + } + + private EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedLocalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } + + private List extractGlobalSecondaryIndices(Map> indicesGroups) { + return indicesGroups.getOrDefault(IndexType.GSI, emptyList()).stream() + .map(this::mapIndexMetadataToEnhancedGlobalSecondaryIndex) + .collect(Collectors.toList()); + } + + private EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedGlobalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); } @Override diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java new file mode 100644 index 000000000000..0fd1fc28cd82 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Enum collecting types of secondary indexes + */ +@SdkInternalApi +public enum IndexType { + LSI, + GSI +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java deleted file mode 100644 index 18395a82656b..000000000000 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; -import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.utils.Validate; - -/** - * A converter between an {@link Enum} and {@link AttributeValue}. - * - *

      - * This stores values in DynamoDB as a string. - * - *

      - * This can be created via {@link #create(Class)}. - */ -@SdkInternalApi -public class EnumAttributeConverter> implements AttributeConverter { - - private final Class enumClass; - private final Map enumValueMap; - - private EnumAttributeConverter(Class enumClass) { - this.enumClass = enumClass; - - Map mutableEnumValueMap = new LinkedHashMap<>(); - Arrays.stream(enumClass.getEnumConstants()) - .forEach(enumConstant -> mutableEnumValueMap.put(enumConstant.toString(), enumConstant)); - - this.enumValueMap = Collections.unmodifiableMap(mutableEnumValueMap); - } - - public static > EnumAttributeConverter create(Class enumClass) { - return new EnumAttributeConverter<>(enumClass); - } - - @Override - public AttributeValue transformFrom(T input) { - return AttributeValue.builder().s(input.toString()).build(); - } - - @Override - public T transformTo(AttributeValue input) { - Validate.isTrue(input.s() != null, "Cannot convert non-string value to enum."); - T returnValue = enumValueMap.get(input.s()); - - if (returnValue == null) { - throw new IllegalArgumentException(String.format("Unable to convert string value '%s' to enum type '%s'", - input.s(), enumClass)); - } - - return returnValue; - } - - @Override - public EnhancedType type() { - return EnhancedType.of(enumClass); - } - - @Override - public AttributeValueType attributeValueType() { - return AttributeValueType.S; - } -} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java index 9bfe3f2528f7..055b685dbe4d 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java @@ -74,7 +74,7 @@ public CreateTableRequest generateRequest(TableSchema tableSchema, List sdkGlobalSecondaryIndices = null; List sdkLocalSecondaryIndices = null; - if (this.request.globalSecondaryIndices() != null) { + if (this.request.globalSecondaryIndices() != null && !this.request.globalSecondaryIndices().isEmpty()) { sdkGlobalSecondaryIndices = this.request.globalSecondaryIndices().stream().map(gsi -> { String indexPartitionKey = tableSchema.tableMetadata().indexPartitionKey(gsi.indexName()); @@ -92,7 +92,7 @@ public CreateTableRequest generateRequest(TableSchema tableSchema, }).collect(Collectors.toList()); } - if (this.request.localSecondaryIndices() != null) { + if (this.request.localSecondaryIndices() != null && !this.request.localSecondaryIndices().isEmpty()) { sdkLocalSecondaryIndices = this.request.localSecondaryIndices().stream().map(lsi -> { Optional indexSortKey = tableSchema.tableMetadata().indexSortKey(lsi.indexName()); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java index d22f87af61a9..d5622bfc6df6 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java @@ -91,6 +91,19 @@ public static Builder builder(Class itemClass, return new Builder<>(attributeType); } + /** + * Constructs a new builder for this class using supplied types. + * @param itemType The {@link EnhancedType} of the immutable item that this attribute composes. + * @param builderType The {@link EnhancedType} of the builder for the immutable item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(EnhancedType itemType, + EnhancedType builderType, + EnhancedType attributeType) { + return new Builder<>(attributeType); + } + /** * Constructs a new builder for this class using supplied types. * @param itemClass The class of the item that this attribute composes. diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java index 2957311d7417..5071869347c8 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java @@ -69,7 +69,17 @@ private StaticAttribute(Builder builder) { * @return A new typed builder for an attribute. */ public static Builder builder(Class itemClass, EnhancedType attributeType) { - return new Builder<>(itemClass, attributeType); + return new Builder<>(EnhancedType.of(itemClass), attributeType); + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemType The {@link EnhancedType} of the item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(EnhancedType itemType, EnhancedType attributeType) { + return new Builder<>(itemType, attributeType); } /** @@ -79,7 +89,7 @@ public static Builder builder(Class itemClass, EnhancedType a * @return A new typed builder for an attribute. */ public static Builder builder(Class itemClass, Class attributeClass) { - return new Builder<>(itemClass, EnhancedType.of(attributeClass)); + return new Builder<>(EnhancedType.of(itemClass), EnhancedType.of(attributeClass)); } /** @@ -146,8 +156,8 @@ ImmutableAttribute toImmutableAttribute() { public static final class Builder { private final ImmutableAttribute.Builder delegateBuilder; - private Builder(Class itemClass, EnhancedType type) { - this.delegateBuilder = ImmutableAttribute.builder(itemClass, itemClass, type); + private Builder(EnhancedType itemType, EnhancedType type) { + this.delegateBuilder = ImmutableAttribute.builder(itemType, itemType, type); } private Builder(ImmutableAttribute.Builder delegateBuilder) { diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java index 5d08ee4a3ae3..ea86ac9fcec4 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java @@ -210,7 +210,7 @@ private StaticImmutableTableSchema(Builder builder) { this.newBuilderSupplier = builder.newBuilderSupplier; this.buildItemFunction = builder.buildItemFunction; this.tableMetadata = tableMetadataBuilder.build(); - this.itemType = EnhancedType.of(builder.itemClass); + this.itemType = builder.itemType; } /** @@ -220,7 +220,18 @@ private StaticImmutableTableSchema(Builder builder) { * @return A newly initialized builder */ public static Builder builder(Class itemClass, Class builderClass) { - return new Builder<>(itemClass, builderClass); + return new Builder<>(EnhancedType.of(itemClass), EnhancedType.of(builderClass)); + } + + /** + * Creates a builder for a {@link StaticImmutableTableSchema} typed to specific immutable data item class. + * @param itemType The {@link EnhancedType} of the immutable data item class object that the + * {@link StaticImmutableTableSchema} is to map to. + * @param builderType The builder {@link EnhancedType} that can be used to construct instances of the immutable data item. + * @return A newly initialized builder + */ + public static Builder builder(EnhancedType itemType, EnhancedType builderType) { + return new Builder<>(itemType, builderType); } /** @@ -230,8 +241,8 @@ public static Builder builder(Class itemClass, Class builderC */ @NotThreadSafe public static final class Builder { - private final Class itemClass; - private final Class builderClass; + private final EnhancedType itemType; + private final EnhancedType builderType; private final List> additionalAttributes = new ArrayList<>(); private final List> flattenedMappers = new ArrayList<>(); @@ -242,9 +253,9 @@ public static final class Builder { private List attributeConverterProviders = Collections.singletonList(ConverterProviderResolver.defaultConverterProvider()); - private Builder(Class itemClass, Class builderClass) { - this.itemClass = itemClass; - this.builderClass = builderClass; + private Builder(EnhancedType itemType, EnhancedType builderType) { + this.itemType = itemType; + this.builderType = builderType; } /** @@ -285,7 +296,7 @@ public Builder addAttribute(EnhancedType attributeType, Consumer> immutableAttribute) { ImmutableAttribute.Builder builder = - ImmutableAttribute.builder(itemClass, builderClass, attributeType); + ImmutableAttribute.builder(itemType, builderType, attributeType); immutableAttribute.accept(builder); return addAttribute(builder.build()); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java index 5d8dbfd94b76..6dc6b2d4f211 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java @@ -75,7 +75,16 @@ private StaticTableSchema(Builder builder) { * @return A newly initialized builder */ public static Builder builder(Class itemClass) { - return new Builder<>(itemClass); + return new Builder<>(EnhancedType.of(itemClass)); + } + + /** + * Creates a builder for a {@link StaticTableSchema} typed to specific data item class. + * @param itemType The {@link EnhancedType} of the data item class object that the {@link StaticTableSchema} is to map to. + * @return A newly initialized builder + */ + public static Builder builder(EnhancedType itemType) { + return new Builder<>(itemType); } /** @@ -85,11 +94,11 @@ public static Builder builder(Class itemClass) { @NotThreadSafe public static final class Builder { private final StaticImmutableTableSchema.Builder delegateBuilder; - private final Class itemClass; + private final EnhancedType itemType; - private Builder(Class itemClass) { - this.delegateBuilder = StaticImmutableTableSchema.builder(itemClass, itemClass); - this.itemClass = itemClass; + private Builder(EnhancedType itemType) { + this.delegateBuilder = StaticImmutableTableSchema.builder(itemType, itemType); + this.itemType = itemType; } /** @@ -130,7 +139,7 @@ public Builder attributes(Collection> staticAttributes) */ public Builder addAttribute(EnhancedType attributeType, Consumer> staticAttribute) { - StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeType); + StaticAttribute.Builder builder = StaticAttribute.builder(itemType, attributeType); staticAttribute.accept(builder); this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); return this; @@ -142,7 +151,7 @@ public Builder addAttribute(EnhancedType attributeType, */ public Builder addAttribute(Class attributeClass, Consumer> staticAttribute) { - StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeClass); + StaticAttribute.Builder builder = StaticAttribute.builder(itemType, EnhancedType.of(attributeClass)); staticAttribute.accept(builder); this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); return this; diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java index d42296dfe110..daac73362923 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticImmutableTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.InvalidBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleBean; @@ -33,11 +34,31 @@ public class TableSchemaTest { public ExpectedException exception = ExpectedException.none(); @Test - public void builder_constructsStaticTableSchemaBuilder() { + public void builder_constructsStaticTableSchemaBuilder_fromClass() { StaticTableSchema.Builder builder = TableSchema.builder(FakeItem.class); assertThat(builder).isNotNull(); } + @Test + public void builder_constructsStaticTableSchemaBuilder_fromEnhancedType() { + StaticTableSchema.Builder builder = TableSchema.builder(EnhancedType.of(FakeItem.class)); + assertThat(builder).isNotNull(); + } + + @Test + public void builder_constructsStaticImmutableTableSchemaBuilder_fromClass() { + StaticImmutableTableSchema.Builder builder = + TableSchema.builder(SimpleImmutable.class, SimpleImmutable.Builder.class); + assertThat(builder).isNotNull(); + } + + @Test + public void builder_constructsStaticImmutableTableSchemaBuilder_fromEnhancedType() { + StaticImmutableTableSchema.Builder builder = + TableSchema.builder(EnhancedType.of(SimpleImmutable.class), EnhancedType.of(SimpleImmutable.Builder.class)); + assertThat(builder).isNotNull(); + } + @Test public void fromBean_constructsBeanTableSchema() { BeanTableSchema beanBeanTableSchema = TableSchema.fromBean(SimpleBean.class); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java new file mode 100644 index 000000000000..fe17f3050533 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java @@ -0,0 +1,113 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.enhanced.dynamodb.EnumAttributeConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +import static org.assertj.core.api.Assertions.assertThat; + +public class EnumAttributeConverterTest { + + @Test + public void transformFromDefault_returnsToString() { + EnumAttributeConverter vehicleConverter = EnumAttributeConverter.create(Vehicle.class); + AttributeValue attribute = vehicleConverter.transformFrom(Vehicle.TRUCK); + + assertThat(attribute.s()).isEqualTo("TRUCK"); + } + + @Test + public void transformToDefault_returnsEnum() { + EnumAttributeConverter vehicleConverter = EnumAttributeConverter.create(Vehicle.class); + + Vehicle bike = vehicleConverter.transformTo(AttributeValue.fromS("BIKE")); + + assertThat(bike).isEqualTo(Vehicle.BIKE); + } + + @Test + public void transformFromDefault_returnsToString_2() { + EnumAttributeConverter animalConverter = EnumAttributeConverter.create(Animal.class); + AttributeValue attribute = animalConverter.transformFrom(Animal.CAT); + + assertThat(attribute.s()).isEqualTo("I am a Cat!"); + } + + @Test + public void transformToDefault_returnsEnum_2() { + EnumAttributeConverter animalConverter = EnumAttributeConverter.create(Animal.class); + + Animal dog = animalConverter.transformTo(AttributeValue.fromS("I am a Dog!")); + + assertThat(dog).isEqualTo(Animal.DOG); + } + + @Test + public void transformFromWithNames_returnsName() { + EnumAttributeConverter personConverter = EnumAttributeConverter.createWithNameAsKeys(Person.class); + AttributeValue attribute = personConverter.transformFrom(Person.JANE); + + assertThat(attribute.s()).isEqualTo("JANE"); + + assertThat(Person.JANE.toString()).isEqualTo("I am a cool person"); + } + + @Test + public void transformToWithNames_returnsEnum() { + EnumAttributeConverter personConverter = EnumAttributeConverter.createWithNameAsKeys(Person.class); + + Person john = personConverter.transformTo(AttributeValue.fromS("JOHN")); + + assertThat(Person.JOHN.toString()).isEqualTo("I am a cool person"); + + assertThat(john).isEqualTo(Person.JOHN); + } + + private static enum Vehicle { + CAR, + BIKE, + TRUCK + } + + private static enum Animal { + DOG, + CAT; + + @Override + public String toString() { + switch (this) { + case DOG: + return "I am a Dog!"; + case CAT: + return "I am a Cat!"; + default: + return null; + } + } + } + + private static enum Person { + JOHN, + JANE; + + @Override + public String toString() { + return "I am a cool person"; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java index b268f2928855..e78c4ea36207 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java @@ -16,20 +16,30 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.verify; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import java.util.Iterator; +import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; @RunWith(MockitoJUnitRunner.class) @@ -113,4 +123,50 @@ public void keyFrom_primaryIndex_partitionAndNullSort() { assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); assertThat(key.sortKeyValue(), is(Optional.empty())); } + + @Test + public void createTable_doesNotTreatPrimaryIndexAsAnyOfSecondaryIndexes() { + DefaultDynamoDbTable dynamoDbMappedIndex = + Mockito.spy(new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table")); + + dynamoDbMappedIndex.createTable(); + + CreateTableEnhancedRequest request = captureCreateTableRequest(dynamoDbMappedIndex); + + assertThat(request.localSecondaryIndices().size(), is(0)); + assertThat(request.globalSecondaryIndices().size(), is(0)); + } + + @Test + public void createTable_groupsSecondaryIndexesExistingInTableSchema() { + DefaultDynamoDbTable dynamoDbMappedIndex = + Mockito.spy(new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table")); + + dynamoDbMappedIndex.createTable(); + + CreateTableEnhancedRequest request = captureCreateTableRequest(dynamoDbMappedIndex); + + assertThat(request.localSecondaryIndices().size(), is(1)); + Iterator lsiIterator = request.localSecondaryIndices().iterator(); + assertThat(lsiIterator.next().indexName(), is("lsi_1")); + + assertThat(request.globalSecondaryIndices().size(), is(2)); + List globalIndicesNames = request.globalSecondaryIndices().stream() + .map(EnhancedGlobalSecondaryIndex::indexName) + .collect(Collectors.toList()); + assertThat(globalIndicesNames, containsInAnyOrder("gsi_1", "gsi_2")); + } + + private static CreateTableEnhancedRequest captureCreateTableRequest(DefaultDynamoDbTable index) { + ArgumentCaptor createTableOperationCaptor = + ArgumentCaptor.forClass(CreateTableEnhancedRequest.class); + verify(index).createTable(createTableOperationCaptor.capture()); + return createTableOperationCaptor.getValue(); + } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java index 2df2c4b052e1..5c1b8b2a4d11 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java @@ -59,6 +59,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testimmutables.EntityEnvelopeImmutable; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @RunWith(MockitoJUnitRunner.class) @@ -801,6 +802,17 @@ public void itemType_returnsCorrectClass() { assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); } + @Test + public void itemType_returnsCorrectClassWhenBuiltWithEnhancedType() { + StaticImmutableTableSchema tableSchema = + StaticImmutableTableSchema.builder(EnhancedType.of(FakeMappedItem.class), + EnhancedType.of(FakeMappedItem.Builder.class)) + .newItemBuilder(FakeMappedItem::builder, FakeMappedItem.Builder::build) + .build(); + + assertThat(tableSchema.itemType(), is(equalTo(EnhancedType.of(FakeMappedItem.class)))); + } + @Test public void getTableMetadata_hasCorrectFields() { TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); @@ -1538,6 +1550,27 @@ public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSuppl assertThat(resultMap.get("aString").s(), is(expectedString)); } + @Test + public void builder_canBuildForGenericClassType() { + StaticImmutableTableSchema, EntityEnvelopeImmutable.Builder> envelopeTableSchema = + StaticImmutableTableSchema.builder(new EnhancedType>() {}, + new EnhancedType>() {}) + .newItemBuilder(EntityEnvelopeImmutable.Builder::new, EntityEnvelopeImmutable.Builder::build) + .addAttribute(String.class, + a -> a.name("entity") + .getter(EntityEnvelopeImmutable::entity) + .setter(EntityEnvelopeImmutable.Builder::setEntity)) + .build(); + + EntityEnvelopeImmutable testEnvelope = new EntityEnvelopeImmutable<>("test-value"); + + Map expectedMap = + Collections.singletonMap("entity", AttributeValue.fromS("test-value")); + + assertThat(envelopeTableSchema.itemToMap(testEnvelope, false), equalTo(expectedMap)); + assertThat(envelopeTableSchema.mapToItem(expectedMap).entity(), equalTo("test-value")); + } + private void verifyAttribute(EnhancedType attributeType, Consumer> staticAttribute, FakeMappedItem fakeMappedItem, diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java index fc43da907b08..368ef26b9648 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java @@ -57,6 +57,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EntityEnvelopeBean; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @RunWith(MockitoJUnitRunner.class) @@ -799,6 +800,16 @@ public void itemType_returnsCorrectClass() { assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); } + @Test + public void itemType_returnsCorrectClassWhenBuiltWithEnhancedType() { + StaticTableSchema tableSchema = StaticTableSchema.builder(EnhancedType.of(FakeMappedItem.class)) + .newItemSupplier(FakeMappedItem::new) + .attributes(ATTRIBUTES) + .build(); + + assertThat(tableSchema.itemType(), is(equalTo(EnhancedType.of(FakeMappedItem.class)))); + } + @Test public void getTableMetadata_hasCorrectFields() { TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); @@ -1485,6 +1496,27 @@ public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSuppl assertThat(resultMap.get("aString").s(), is(expectedString)); } + @Test + public void builder_canBuildForGenericClassType() { + StaticTableSchema> envelopeTableSchema = + StaticTableSchema.builder(new EnhancedType>() {}) + .newItemSupplier(EntityEnvelopeBean::new) + .addAttribute(String.class, + a -> a.name("entity") + .getter(EntityEnvelopeBean::getEntity) + .setter(EntityEnvelopeBean::setEntity)) + .build(); + + EntityEnvelopeBean testEnvelope = new EntityEnvelopeBean<>(); + testEnvelope.setEntity("test-value"); + + Map expectedMap = + Collections.singletonMap("entity", AttributeValue.fromS("test-value")); + + assertThat(envelopeTableSchema.itemToMap(testEnvelope, false), equalTo(expectedMap)); + assertThat(envelopeTableSchema.mapToItem(expectedMap).getEntity(), equalTo("test-value")); + } + private void verifyAttribute(EnhancedType attributeType, Consumer> staticAttribute, FakeMappedItem fakeMappedItem, diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java new file mode 100644 index 000000000000..5097ae8d6747 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +public class EntityEnvelopeBean { + private T entity; + + public T getEntity() { + return this.entity; + } + + public void setEntity(T entity) { + this.entity = entity; + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java new file mode 100644 index 000000000000..8be0b00f0d70 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testimmutables; + +public class EntityEnvelopeImmutable { + private final T entity; + + public EntityEnvelopeImmutable(T entity) { + this.entity = entity; + } + + public T entity() { + return this.entity; + } + + public static class Builder { + private T entity; + + public void setEntity(T entity) { + this.entity = entity; + } + + public EntityEnvelopeImmutable build() { + return new EntityEnvelopeImmutable<>(this.entity); + } + } +} + diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 752c3cfeebbc..6c09968e5420 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 00c3deaeb613..5a6696eddb02 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java index 28f054ebd02a..94aaaf03b6dc 100644 --- a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java +++ b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java @@ -118,7 +118,7 @@ private static void createBucket(String bucketName, int retryCount) { if (e.awsErrorDetails().errorCode().equals("BucketAlreadyOwnedByYou")) { System.err.printf("%s bucket already exists, likely leaked by a previous run\n", bucketName); } else if (e.awsErrorDetails().errorCode().equals("TooManyBuckets")) { - System.err.println("Printing all buckets for debug:"); + System.err.println("Error: TooManyBuckets. Printing all buckets for debug:"); s3.listBuckets().buckets().forEach(System.err::println); if (retryCount < 2) { System.err.println("Retrying..."); diff --git a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java index 460e72e392e4..b6afca397565 100644 --- a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java +++ b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3TransferManagerUploadPauseResumeIntegrationTest.java @@ -24,10 +24,14 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.time.Duration; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import software.amazon.awssdk.core.retry.backoff.FixedDelayBackoffStrategy; +import software.amazon.awssdk.core.waiters.AsyncWaiter; import software.amazon.awssdk.core.waiters.Waiter; import software.amazon.awssdk.core.waiters.WaiterAcceptor; import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse; @@ -48,12 +52,14 @@ public class S3TransferManagerUploadPauseResumeIntegrationTest extends S3Integra private static final long OBJ_SIZE = 24 * MB; private static File largeFile; private static File smallFile; + private static ScheduledExecutorService executorService; @BeforeAll public static void setup() throws Exception { createBucket(BUCKET); largeFile = new RandomTempFile(OBJ_SIZE); smallFile = new RandomTempFile(2 * MB); + executorService = Executors.newScheduledThreadPool(3); } @AfterAll @@ -61,6 +67,7 @@ public static void cleanup() { deleteBucketAndAllContents(BUCKET); largeFile.delete(); smallFile.delete(); + executorService.shutdown(); } @Test @@ -151,8 +158,13 @@ private void verifyMultipartUploadIdExists(ResumableFileUpload resumableFileUplo private void verifyMultipartUploadIdNotExist(ResumableFileUpload resumableFileUpload) { String multipartUploadId = resumableFileUpload.multipartUploadId().get(); - assertThatThrownBy(() -> s3Async.listParts(r -> r.uploadId(multipartUploadId).bucket(BUCKET).key(KEY)).join()) - .hasCauseInstanceOf(NoSuchUploadException.class); + AsyncWaiter waiter = AsyncWaiter.builder(ListPartsResponse.class) + .addAcceptor(WaiterAcceptor.successOnExceptionAcceptor(e -> e instanceof NoSuchUploadException)) + .addAcceptor(WaiterAcceptor.retryOnResponseAcceptor(r -> true)) + .overrideConfiguration(o -> o.waitTimeout(Duration.ofMinutes(1))) + .scheduledExecutorService(executorService) + .build(); + waiter.runAsync(() -> s3Async.listParts(r -> r.uploadId(multipartUploadId).bucket(BUCKET).key(KEY))); } private static void waitUntilMultipartUploadExists() { diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/S3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/S3TransferManager.java index 5bc253c910f4..1dc5adb75072 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/S3TransferManager.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/S3TransferManager.java @@ -28,7 +28,7 @@ import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; -import software.amazon.awssdk.transfer.s3.internal.DefaultS3TransferManager; +import software.amazon.awssdk.transfer.s3.internal.TransferManagerFactory; import software.amazon.awssdk.transfer.s3.model.CompletedDirectoryDownload; import software.amazon.awssdk.transfer.s3.model.CompletedDirectoryUpload; import software.amazon.awssdk.transfer.s3.model.Copy; @@ -678,7 +678,7 @@ static S3TransferManager create() { * Creates a default builder for {@link S3TransferManager}. */ static S3TransferManager.Builder builder() { - return DefaultS3TransferManager.builder(); + return new TransferManagerFactory.DefaultBuilder(); } /** diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java new file mode 100644 index 000000000000..f118c84693f2 --- /dev/null +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/CrtS3TransferManager.java @@ -0,0 +1,221 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import static software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute.SDK_HTTP_EXECUTION_ATTRIBUTES; +import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.CRT_PAUSE_RESUME_TOKEN; +import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.METAREQUEST_PAUSE_OBSERVABLE; +import static software.amazon.awssdk.transfer.s3.internal.GenericS3TransferManager.DEFAULT_FILE_UPLOAD_CHUNK_SIZE; +import static software.amazon.awssdk.transfer.s3.internal.GenericS3TransferManager.assertNotUnsupportedArn; +import static software.amazon.awssdk.transfer.s3.internal.utils.FileUtils.fileNotModified; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; +import software.amazon.awssdk.crt.s3.ResumeToken; +import software.amazon.awssdk.http.SdkHttpExecutionAttributes; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.transfer.s3.internal.model.CrtFileUpload; +import software.amazon.awssdk.transfer.s3.internal.progress.TransferProgressUpdater; +import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * An implementation of {@link S3TransferManager} that uses CRT-based S3 client under the hood. + */ +@SdkInternalApi +class CrtS3TransferManager extends DelegatingS3TransferManager { + private static final Logger log = Logger.loggerFor(S3TransferManager.class); + private final S3AsyncClient s3AsyncClient; + + CrtS3TransferManager(TransferManagerConfiguration transferConfiguration, S3AsyncClient s3AsyncClient, + boolean isDefaultS3AsyncClient) { + super(new GenericS3TransferManager(transferConfiguration, s3AsyncClient, isDefaultS3AsyncClient)); + this.s3AsyncClient = s3AsyncClient; + } + + @Override + public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { + Validate.paramNotNull(uploadFileRequest, "uploadFileRequest"); + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + + AsyncRequestBody requestBody = + FileAsyncRequestBody.builder() + .path(uploadFileRequest.source()) + .chunkSizeInBytes(DEFAULT_FILE_UPLOAD_CHUNK_SIZE) + .build(); + + Consumer attachObservable = + b -> b.put(METAREQUEST_PAUSE_OBSERVABLE, observable); + + PutObjectRequest putObjectRequest = attachSdkAttribute(uploadFileRequest.putObjectRequest(), attachObservable); + + CompletableFuture returnFuture = new CompletableFuture<>(); + + TransferProgressUpdater progressUpdater = new TransferProgressUpdater(uploadFileRequest, requestBody); + progressUpdater.transferInitiated(); + requestBody = progressUpdater.wrapRequestBody(requestBody); + progressUpdater.registerCompletion(returnFuture); + + try { + assertNotUnsupportedArn(putObjectRequest.bucket(), "upload"); + + CompletableFuture crtFuture = + s3AsyncClient.putObject(putObjectRequest, requestBody); + + // Forward upload cancellation to CRT future + CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + + CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + r -> CompletedFileUpload.builder() + .response(r) + .build()); + } catch (Throwable throwable) { + returnFuture.completeExceptionally(throwable); + } + + + return new CrtFileUpload(returnFuture, progressUpdater.progress(), observable, uploadFileRequest); + } + + private FileUpload uploadFromBeginning(ResumableFileUpload resumableFileUpload, boolean fileModified, + boolean noResumeToken) { + UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + if (fileModified) { + log.debug(() -> String.format("The file (%s) has been modified since " + + "the last pause. " + + "The SDK will upload the requested object in bucket" + + " (%s) with key (%s) from " + + "the " + + "beginning.", + uploadFileRequest.source(), + putObjectRequest.bucket(), + putObjectRequest.key())); + resumableFileUpload.multipartUploadId() + .ifPresent(id -> { + log.debug(() -> "Aborting previous upload with multipartUploadId: " + id); + s3AsyncClient.abortMultipartUpload( + AbortMultipartUploadRequest.builder() + .bucket(putObjectRequest.bucket()) + .key(putObjectRequest.key()) + .uploadId(id) + .build()) + .exceptionally(t -> { + log.warn(() -> String.format("Failed to abort previous multipart upload " + + "(id: %s)" + + ". You may need to call " + + "S3AsyncClient#abortMultiPartUpload to " + + "free all storage consumed by" + + " all parts. ", + id), t); + return null; + }); + }); + } + + if (noResumeToken) { + log.debug(() -> String.format("No resume token is found. " + + "The SDK will upload the requested object in bucket" + + " (%s) with key (%s) from " + + "the beginning.", + putObjectRequest.bucket(), + putObjectRequest.key())); + } + + + return uploadFile(uploadFileRequest); + } + + @Override + public FileUpload resumeUploadFile(ResumableFileUpload resumableFileUpload) { + Validate.paramNotNull(resumableFileUpload, "resumableFileUpload"); + + boolean fileModified = !fileNotModified(resumableFileUpload.fileLength(), + resumableFileUpload.fileLastModified(), + resumableFileUpload.uploadFileRequest().source()); + + boolean noResumeToken = !hasResumeToken(resumableFileUpload); + + if (fileModified || noResumeToken) { + return uploadFromBeginning(resumableFileUpload, fileModified, noResumeToken); + } + + return doResumeUpload(resumableFileUpload); + } + + private FileUpload doResumeUpload(ResumableFileUpload resumableFileUpload) { + UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); + ResumeToken resumeToken = crtResumeToken(resumableFileUpload); + + Consumer attachResumeToken = + b -> b.put(CRT_PAUSE_RESUME_TOKEN, resumeToken); + + PutObjectRequest modifiedPutObjectRequest = attachSdkAttribute(putObjectRequest, attachResumeToken); + + return uploadFile(uploadFileRequest.toBuilder() + .putObjectRequest(modifiedPutObjectRequest) + .build()); + } + + private static ResumeToken crtResumeToken(ResumableFileUpload resumableFileUpload) { + return new ResumeToken(new ResumeToken.PutResumeTokenBuilder() + .withNumPartsCompleted(resumableFileUpload.transferredParts().orElse(0L)) + .withTotalNumParts(resumableFileUpload.totalParts().orElse(0L)) + .withPartSize(resumableFileUpload.partSizeInBytes().getAsLong()) + .withUploadId(resumableFileUpload.multipartUploadId().orElse(null))); + } + + private boolean hasResumeToken(ResumableFileUpload resumableFileUpload) { + return resumableFileUpload.totalParts().isPresent() && resumableFileUpload.partSizeInBytes().isPresent(); + } + + private PutObjectRequest attachSdkAttribute(PutObjectRequest putObjectRequest, + Consumer builderMutation) { + SdkHttpExecutionAttributes modifiedAttributes = + putObjectRequest.overrideConfiguration().map(o -> o.executionAttributes().getAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES)) + .map(b -> b.toBuilder().applyMutation(builderMutation).build()) + .orElseGet(() -> SdkHttpExecutionAttributes.builder().applyMutation(builderMutation).build()); + + Consumer attachSdkHttpAttributes = + b -> b.putExecutionAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES, modifiedAttributes); + + AwsRequestOverrideConfiguration modifiedRequestOverrideConfig = + putObjectRequest.overrideConfiguration() + .map(o -> o.toBuilder().applyMutation(attachSdkHttpAttributes).build()) + .orElseGet(() -> AwsRequestOverrideConfiguration.builder() + .applyMutation(attachSdkHttpAttributes) + .build()); + + return putObjectRequest.toBuilder() + .overrideConfiguration(modifiedRequestOverrideConfig) + .build(); + } +} diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DelegatingS3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DelegatingS3TransferManager.java new file mode 100644 index 000000000000..f2929bf8f988 --- /dev/null +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DelegatingS3TransferManager.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.transfer.s3.model.Copy; +import software.amazon.awssdk.transfer.s3.model.CopyRequest; +import software.amazon.awssdk.transfer.s3.model.DirectoryDownload; +import software.amazon.awssdk.transfer.s3.model.DirectoryUpload; +import software.amazon.awssdk.transfer.s3.model.Download; +import software.amazon.awssdk.transfer.s3.model.DownloadDirectoryRequest; +import software.amazon.awssdk.transfer.s3.model.DownloadFileRequest; +import software.amazon.awssdk.transfer.s3.model.DownloadRequest; +import software.amazon.awssdk.transfer.s3.model.FileDownload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; +import software.amazon.awssdk.transfer.s3.model.Upload; +import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest; +import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; +import software.amazon.awssdk.transfer.s3.model.UploadRequest; + + +/** + * An {@link S3TransferManager} that just delegates to another {@link S3TransferManager}. + */ +@SdkInternalApi +abstract class DelegatingS3TransferManager implements S3TransferManager { + private final S3TransferManager delegate; + + protected DelegatingS3TransferManager(S3TransferManager delegate) { + this.delegate = delegate; + } + + @Override + public Upload upload(UploadRequest uploadRequest) { + return delegate.upload(uploadRequest); + } + + @Override + public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { + return delegate.uploadFile(uploadFileRequest); + } + + @Override + public DirectoryUpload uploadDirectory(UploadDirectoryRequest uploadDirectoryRequest) { + return delegate.uploadDirectory(uploadDirectoryRequest); + } + + @Override + public Download download(DownloadRequest downloadRequest) { + return delegate.download(downloadRequest); + } + + @Override + public FileDownload downloadFile(DownloadFileRequest downloadRequest) { + return delegate.downloadFile(downloadRequest); + } + + @Override + public FileDownload resumeDownloadFile(ResumableFileDownload resumableFileDownload) { + return delegate.resumeDownloadFile(resumableFileDownload); + } + + @Override + public DirectoryDownload downloadDirectory(DownloadDirectoryRequest downloadDirectoryRequest) { + return delegate.downloadDirectory(downloadDirectoryRequest); + } + + @Override + public Copy copy(CopyRequest copyRequest) { + return delegate.copy(copyRequest); + } + + @Override + public void close() { + delegate.close(); + } +} diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DefaultS3TransferManager.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java similarity index 60% rename from services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DefaultS3TransferManager.java rename to services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java index 7e4f302ee710..83d63a2a48ff 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/DefaultS3TransferManager.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/GenericS3TransferManager.java @@ -15,38 +15,24 @@ package software.amazon.awssdk.transfer.s3.internal; -import static software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute.SDK_HTTP_EXECUTION_ATTRIBUTES; -import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.CRT_PAUSE_RESUME_TOKEN; -import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.METAREQUEST_PAUSE_OBSERVABLE; import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; -import static software.amazon.awssdk.transfer.s3.internal.utils.FileUtils.fileNotModified; import static software.amazon.awssdk.transfer.s3.internal.utils.ResumableRequestConverter.toDownloadFileRequestAndTransformer; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; -import java.util.concurrent.Executor; -import java.util.function.Consumer; -import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.FileTransformerConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; -import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; -import software.amazon.awssdk.crt.s3.ResumeToken; -import software.amazon.awssdk.http.SdkHttpExecutionAttributes; import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; -import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; import software.amazon.awssdk.services.s3.internal.resource.S3ArnConverter; import software.amazon.awssdk.services.s3.internal.resource.S3Resource; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CopyObjectResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; @@ -79,61 +65,43 @@ import software.amazon.awssdk.transfer.s3.model.FileDownload; import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; -import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.Upload; import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.model.UploadRequest; import software.amazon.awssdk.transfer.s3.progress.TransferProgress; import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.Validate; @SdkInternalApi -public final class DefaultS3TransferManager implements S3TransferManager { +class GenericS3TransferManager implements S3TransferManager { + protected static final int DEFAULT_FILE_UPLOAD_CHUNK_SIZE = (int) (16 * MB); private static final Logger log = Logger.loggerFor(S3TransferManager.class); - private static final int DEFAULT_FILE_UPLOAD_CHUNK_SIZE = (int) (16 * MB); private final S3AsyncClient s3AsyncClient; - private final TransferManagerConfiguration transferConfiguration; private final UploadDirectoryHelper uploadDirectoryHelper; private final DownloadDirectoryHelper downloadDirectoryHelper; private final boolean isDefaultS3AsyncClient; - private final S3ClientType s3ClientType; - - public DefaultS3TransferManager(DefaultBuilder tmBuilder) { - transferConfiguration = resolveTransferManagerConfiguration(tmBuilder); - if (tmBuilder.s3AsyncClient == null) { - isDefaultS3AsyncClient = true; - s3AsyncClient = defaultS3AsyncClient().get(); - } else { - isDefaultS3AsyncClient = false; - s3AsyncClient = tmBuilder.s3AsyncClient; - } + private final TransferManagerConfiguration transferConfiguration; + GenericS3TransferManager(TransferManagerConfiguration transferConfiguration, + S3AsyncClient s3AsyncClient, + boolean isDefaultS3AsyncClient) { + this.s3AsyncClient = s3AsyncClient; + this.transferConfiguration = transferConfiguration; uploadDirectoryHelper = new UploadDirectoryHelper(transferConfiguration, this::uploadFile); ListObjectsHelper listObjectsHelper = new ListObjectsHelper(s3AsyncClient::listObjectsV2); downloadDirectoryHelper = new DownloadDirectoryHelper(transferConfiguration, listObjectsHelper, this::downloadFile); - - if (s3AsyncClient instanceof S3CrtAsyncClient) { - s3ClientType = S3ClientType.CRT_BASED; - } else if (s3AsyncClient.getClass().getName().equals("software.amazon.awssdk.services.s3.DefaultS3AsyncClient")) { - s3ClientType = S3ClientType.JAVA_BASED; - log.warn(() -> "The provided DefaultS3AsyncClient is not an instance of S3CrtAsyncClient, and thus multipart" - + " upload/download feature is not enabled and resumable file upload is not supported. To benefit " - + "from maximum throughput, consider using S3AsyncClient.crtBuilder().build() instead."); - } else { - s3ClientType = S3ClientType.OTHER; - log.debug(() -> "The provided S3AsyncClient is not an instance of S3CrtAsyncClient, and thus multipart" - + " upload/download feature may not be enabled and resumable file upload may not be supported."); - } + this.isDefaultS3AsyncClient = isDefaultS3AsyncClient; } @SdkTestInternalApi - DefaultS3TransferManager(S3AsyncClient s3CrtAsyncClient, + GenericS3TransferManager(S3AsyncClient s3CrtAsyncClient, UploadDirectoryHelper uploadDirectoryHelper, TransferManagerConfiguration configuration, DownloadDirectoryHelper downloadDirectoryHelper) { @@ -142,31 +110,6 @@ public DefaultS3TransferManager(DefaultBuilder tmBuilder) { this.transferConfiguration = configuration; this.uploadDirectoryHelper = uploadDirectoryHelper; this.downloadDirectoryHelper = downloadDirectoryHelper; - s3ClientType = s3CrtAsyncClient instanceof S3CrtAsyncClient ? S3ClientType.CRT_BASED : S3ClientType.JAVA_BASED; - } - - private static Supplier defaultS3AsyncClient() { - if (crtInClasspath()) { - return S3AsyncClient::crtCreate; - } - return S3AsyncClient::create; - } - - private static boolean crtInClasspath() { - try { - ClassLoaderHelper.loadClass("software.amazon.awssdk.crt.s3.S3Client", false); - } catch (ClassNotFoundException e) { - return false; - } - return true; - } - - private static TransferManagerConfiguration resolveTransferManagerConfiguration(DefaultBuilder tmBuilder) { - TransferManagerConfiguration.Builder transferConfigBuilder = TransferManagerConfiguration.builder(); - transferConfigBuilder.uploadDirectoryFollowSymbolicLinks(tmBuilder.uploadDirectoryFollowSymbolicLinks); - transferConfigBuilder.uploadDirectoryMaxDepth(tmBuilder.uploadDirectoryMaxDepth); - transferConfigBuilder.executor(tmBuilder.executor); - return transferConfigBuilder.build(); } @Override @@ -205,7 +148,6 @@ public Upload upload(UploadRequest uploadRequest) { @Override public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { Validate.paramNotNull(uploadFileRequest, "uploadFileRequest"); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); AsyncRequestBody requestBody = FileAsyncRequestBody.builder() @@ -213,10 +155,7 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { .chunkSizeInBytes(DEFAULT_FILE_UPLOAD_CHUNK_SIZE) .build(); - Consumer attachObservable = - b -> b.put(METAREQUEST_PAUSE_OBSERVABLE, observable); - - PutObjectRequest putObjectRequest = attachSdkAttribute(uploadFileRequest.putObjectRequest(), attachObservable); + PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); CompletableFuture returnFuture = new CompletableFuture<>(); @@ -228,13 +167,13 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { try { assertNotUnsupportedArn(putObjectRequest.bucket(), "upload"); - CompletableFuture crtFuture = + CompletableFuture putObjectFuture = s3AsyncClient.putObject(putObjectRequest, requestBody); - // Forward upload cancellation to CRT future - CompletableFutureUtils.forwardExceptionTo(returnFuture, crtFuture); + // Forward upload cancellation to putObjectFuture + CompletableFutureUtils.forwardExceptionTo(returnFuture, putObjectFuture); - CompletableFutureUtils.forwardTransformedResultTo(crtFuture, returnFuture, + CompletableFutureUtils.forwardTransformedResultTo(putObjectFuture, returnFuture, r -> CompletedFileUpload.builder() .response(r) .build()); @@ -242,123 +181,7 @@ public FileUpload uploadFile(UploadFileRequest uploadFileRequest) { returnFuture.completeExceptionally(throwable); } - - return new DefaultFileUpload(returnFuture, progressUpdater.progress(), observable, uploadFileRequest, s3ClientType); - } - - @Override - public FileUpload resumeUploadFile(ResumableFileUpload resumableFileUpload) { - Validate.paramNotNull(resumableFileUpload, "resumableFileUpload"); - - boolean fileModified = !fileNotModified(resumableFileUpload.fileLength(), - resumableFileUpload.fileLastModified(), - resumableFileUpload.uploadFileRequest().source()); - - boolean noResumeToken = !hasResumeToken(resumableFileUpload); - - if (fileModified || noResumeToken) { - return uploadFromBeginning(resumableFileUpload, fileModified, noResumeToken); - } - - return doResumeUpload(resumableFileUpload); - } - - private FileUpload doResumeUpload(ResumableFileUpload resumableFileUpload) { - UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); - PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); - ResumeToken resumeToken = crtResumeToken(resumableFileUpload); - - Consumer attachResumeToken = - b -> b.put(CRT_PAUSE_RESUME_TOKEN, resumeToken); - - PutObjectRequest modifiedPutObjectRequest = attachSdkAttribute(putObjectRequest, attachResumeToken); - - return uploadFile(uploadFileRequest.toBuilder() - .putObjectRequest(modifiedPutObjectRequest) - .build()); - } - - private static ResumeToken crtResumeToken(ResumableFileUpload resumableFileUpload) { - return new ResumeToken(new ResumeToken.PutResumeTokenBuilder() - .withNumPartsCompleted(resumableFileUpload.transferredParts().orElse(0L)) - .withTotalNumParts(resumableFileUpload.totalParts().orElse(0L)) - .withPartSize(resumableFileUpload.partSizeInBytes().getAsLong()) - .withUploadId(resumableFileUpload.multipartUploadId().orElse(null))); - } - - private FileUpload uploadFromBeginning(ResumableFileUpload resumableFileUpload, boolean fileModified, - boolean noResumeToken) { - UploadFileRequest uploadFileRequest = resumableFileUpload.uploadFileRequest(); - PutObjectRequest putObjectRequest = uploadFileRequest.putObjectRequest(); - if (fileModified) { - log.debug(() -> String.format("The file (%s) has been modified since " - + "the last pause. " + - "The SDK will upload the requested object in bucket" - + " (%s) with key (%s) from " - + "the " - + "beginning.", - uploadFileRequest.source(), - putObjectRequest.bucket(), - putObjectRequest.key())); - resumableFileUpload.multipartUploadId() - .ifPresent(id -> { - log.debug(() -> "Aborting previous upload with multipartUploadId: " + id); - s3AsyncClient.abortMultipartUpload( - AbortMultipartUploadRequest.builder() - .bucket(putObjectRequest.bucket()) - .key(putObjectRequest.key()) - .uploadId(id) - .build()) - .exceptionally(t -> { - log.warn(() -> String.format("Failed to abort previous multipart upload " - + "(id: %s)" - + ". You may need to call " - + "S3AsyncClient#abortMultiPartUpload to " - + "free all storage consumed by" - + " all parts. ", - id), t); - return null; - }); - }); - } - - if (noResumeToken) { - log.debug(() -> String.format("No resume token is found. " + - "The SDK will upload the requested object in bucket" - + " (%s) with key (%s) from " - + "the beginning.", - putObjectRequest.bucket(), - putObjectRequest.key())); - } - - - return uploadFile(uploadFileRequest); - } - - private boolean hasResumeToken(ResumableFileUpload resumableFileUpload) { - return resumableFileUpload.totalParts().isPresent() && resumableFileUpload.partSizeInBytes().isPresent(); - } - - private PutObjectRequest attachSdkAttribute(PutObjectRequest putObjectRequest, - Consumer builderMutation) { - SdkHttpExecutionAttributes modifiedAttributes = - putObjectRequest.overrideConfiguration().map(o -> o.executionAttributes().getAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES)) - .map(b -> b.toBuilder().applyMutation(builderMutation).build()) - .orElseGet(() -> SdkHttpExecutionAttributes.builder().applyMutation(builderMutation).build()); - - Consumer attachSdkHttpAttributes = - b -> b.putExecutionAttribute(SDK_HTTP_EXECUTION_ATTRIBUTES, modifiedAttributes); - - AwsRequestOverrideConfiguration modifiedRequestOverrideConfig = - putObjectRequest.overrideConfiguration() - .map(o -> o.toBuilder().applyMutation(attachSdkHttpAttributes).build()) - .orElseGet(() -> AwsRequestOverrideConfiguration.builder() - .applyMutation(attachSdkHttpAttributes) - .build()); - - return putObjectRequest.toBuilder() - .overrideConfiguration(modifiedRequestOverrideConfig) - .build(); + return new DefaultFileUpload(returnFuture, progressUpdater.progress(), uploadFileRequest); } @Override @@ -563,16 +386,12 @@ public Copy copy(CopyRequest copyRequest) { @Override public void close() { if (isDefaultS3AsyncClient) { - s3AsyncClient.close(); + IoUtils.closeQuietly(s3AsyncClient, log.logger()); } - transferConfiguration.close(); - } - - public static Builder builder() { - return new DefaultBuilder(); + IoUtils.closeQuietly(transferConfiguration, log.logger()); } - private static void assertNotUnsupportedArn(String bucket, String operation) { + protected static void assertNotUnsupportedArn(String bucket, String operation) { if (bucket == null) { return; } @@ -608,59 +427,4 @@ private static boolean isMrapArn(Arn arn) { return !s3EndpointResource.region().isPresent(); } - - private static final class DefaultBuilder implements S3TransferManager.Builder { - private S3AsyncClient s3AsyncClient; - private Executor executor; - private Boolean uploadDirectoryFollowSymbolicLinks; - private Integer uploadDirectoryMaxDepth; - - private DefaultBuilder() { - } - - @Override - public Builder s3Client(S3AsyncClient s3AsyncClient) { - this.s3AsyncClient = s3AsyncClient; - return this; - } - - @Override - public Builder executor(Executor executor) { - this.executor = executor; - return this; - } - - @Override - public Builder uploadDirectoryFollowSymbolicLinks(Boolean uploadDirectoryFollowSymbolicLinks) { - this.uploadDirectoryFollowSymbolicLinks = uploadDirectoryFollowSymbolicLinks; - return this; - } - - public void setUploadDirectoryFollowSymbolicLinks(Boolean followSymbolicLinks) { - uploadDirectoryFollowSymbolicLinks(followSymbolicLinks); - } - - public Boolean getUploadDirectoryFollowSymbolicLinks() { - return uploadDirectoryFollowSymbolicLinks; - } - - @Override - public Builder uploadDirectoryMaxDepth(Integer uploadDirectoryMaxDepth) { - this.uploadDirectoryMaxDepth = uploadDirectoryMaxDepth; - return this; - } - - public void setUploadDirectoryMaxDepth(Integer uploadDirectoryMaxDepth) { - uploadDirectoryMaxDepth(uploadDirectoryMaxDepth); - } - - public Integer getUploadDirectoryMaxDepth() { - return uploadDirectoryMaxDepth; - } - - @Override - public S3TransferManager build() { - return new DefaultS3TransferManager(this); - } - } } diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferConfigurationOption.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferConfigurationOption.java index c80478a66737..d96677d71d5d 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferConfigurationOption.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferConfigurationOption.java @@ -20,7 +20,7 @@ import software.amazon.awssdk.utils.AttributeMap; /** - * A set of internal options required by the {@link DefaultS3TransferManager} via {@link TransferManagerConfiguration}. + * A set of internal options required by the {@link TransferManagerFactory} via {@link TransferManagerConfiguration}. * It contains the default settings * */ diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerConfiguration.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerConfiguration.java index d2888430ed4e..a8f71c7fb59b 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerConfiguration.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerConfiguration.java @@ -24,7 +24,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.ExecutorUtils; @@ -32,7 +31,7 @@ import software.amazon.awssdk.utils.ThreadFactoryBuilder; /** - * Contains resolved configuration settings for {@link S3TransferManager}. + * Contains resolved configuration settings for {@link GenericS3TransferManager}. * This configuration object can be {@link #close()}d to release all closeable resources configured within it. */ @SdkInternalApi diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerFactory.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerFactory.java new file mode 100644 index 000000000000..a87d8d6fcd60 --- /dev/null +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/TransferManagerFactory.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import java.util.concurrent.Executor; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.utils.Logger; + + +/** + * An {@link S3TransferManager} factory that instantiate an {@link S3TransferManager} implementation based on the underlying + * {@link S3AsyncClient}. + */ +@SdkInternalApi +public final class TransferManagerFactory { + private static final Logger log = Logger.loggerFor(S3TransferManager.class); + + private TransferManagerFactory() { + } + + public static S3TransferManager createTransferManager(DefaultBuilder tmBuilder) { + TransferManagerConfiguration transferConfiguration = resolveTransferManagerConfiguration(tmBuilder); + S3AsyncClient s3AsyncClient; + boolean isDefaultS3AsyncClient; + if (tmBuilder.s3AsyncClient == null) { + isDefaultS3AsyncClient = true; + s3AsyncClient = defaultS3AsyncClient().get(); + } else { + isDefaultS3AsyncClient = false; + s3AsyncClient = tmBuilder.s3AsyncClient; + } + + if (s3AsyncClient instanceof S3CrtAsyncClient) { + return new CrtS3TransferManager(transferConfiguration, s3AsyncClient, isDefaultS3AsyncClient); + } + + if (s3AsyncClient.getClass().getName().equals("software.amazon.awssdk.services.s3.DefaultS3AsyncClient")) { + log.warn(() -> "The provided DefaultS3AsyncClient is not an instance of S3CrtAsyncClient, and thus multipart" + + " upload/download feature is not enabled and resumable file upload is not supported. To benefit " + + "from maximum throughput, consider using S3AsyncClient.crtBuilder().build() instead."); + } else { + log.debug(() -> "The provided S3AsyncClient is not an instance of S3CrtAsyncClient, and thus multipart" + + " upload/download feature may not be enabled and resumable file upload may not be supported."); + } + + return new GenericS3TransferManager(transferConfiguration, s3AsyncClient, isDefaultS3AsyncClient); + } + + private static Supplier defaultS3AsyncClient() { + if (crtInClasspath()) { + return S3AsyncClient::crtCreate; + } + return S3AsyncClient::create; + } + + private static boolean crtInClasspath() { + try { + ClassLoaderHelper.loadClass("software.amazon.awssdk.crt.s3.S3Client", false); + } catch (ClassNotFoundException e) { + return false; + } + return true; + } + + private static TransferManagerConfiguration resolveTransferManagerConfiguration(DefaultBuilder tmBuilder) { + TransferManagerConfiguration.Builder transferConfigBuilder = TransferManagerConfiguration.builder(); + transferConfigBuilder.uploadDirectoryFollowSymbolicLinks(tmBuilder.uploadDirectoryFollowSymbolicLinks); + transferConfigBuilder.uploadDirectoryMaxDepth(tmBuilder.uploadDirectoryMaxDepth); + transferConfigBuilder.executor(tmBuilder.executor); + return transferConfigBuilder.build(); + } + + public static final class DefaultBuilder implements S3TransferManager.Builder { + private S3AsyncClient s3AsyncClient; + private Executor executor; + private Boolean uploadDirectoryFollowSymbolicLinks; + private Integer uploadDirectoryMaxDepth; + + @Override + public DefaultBuilder s3Client(S3AsyncClient s3AsyncClient) { + this.s3AsyncClient = s3AsyncClient; + return this; + } + + @Override + public DefaultBuilder executor(Executor executor) { + this.executor = executor; + return this; + } + + @Override + public DefaultBuilder uploadDirectoryFollowSymbolicLinks(Boolean uploadDirectoryFollowSymbolicLinks) { + this.uploadDirectoryFollowSymbolicLinks = uploadDirectoryFollowSymbolicLinks; + return this; + } + + public void setUploadDirectoryFollowSymbolicLinks(Boolean followSymbolicLinks) { + uploadDirectoryFollowSymbolicLinks(followSymbolicLinks); + } + + public Boolean getUploadDirectoryFollowSymbolicLinks() { + return uploadDirectoryFollowSymbolicLinks; + } + + @Override + public DefaultBuilder uploadDirectoryMaxDepth(Integer uploadDirectoryMaxDepth) { + this.uploadDirectoryMaxDepth = uploadDirectoryMaxDepth; + return this; + } + + public void setUploadDirectoryMaxDepth(Integer uploadDirectoryMaxDepth) { + uploadDirectoryMaxDepth(uploadDirectoryMaxDepth); + } + + public Integer getUploadDirectoryMaxDepth() { + return uploadDirectoryMaxDepth; + } + + @Override + public S3TransferManager build() { + return createTransferManager(this); + } + } +} diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java new file mode 100644 index 000000000000..4f7a4a757c2c --- /dev/null +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/CrtFileUpload.java @@ -0,0 +1,157 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal.model; + +import java.io.File; +import java.time.Instant; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.crt.CrtRuntimeException; +import software.amazon.awssdk.crt.s3.ResumeToken; +import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; +import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; +import software.amazon.awssdk.transfer.s3.progress.TransferProgress; +import software.amazon.awssdk.utils.Lazy; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public final class CrtFileUpload implements FileUpload { + private final Lazy resumableFileUpload; + private final CompletableFuture completionFuture; + private final TransferProgress progress; + private final UploadFileRequest request; + private final S3MetaRequestPauseObservable observable; + + public CrtFileUpload(CompletableFuture completionFuture, + TransferProgress progress, + S3MetaRequestPauseObservable observable, + UploadFileRequest request) { + this.completionFuture = Validate.paramNotNull(completionFuture, "completionFuture"); + this.progress = Validate.paramNotNull(progress, "progress"); + this.observable = Validate.paramNotNull(observable, "observable"); + this.request = Validate.paramNotNull(request, "request"); + this.resumableFileUpload = new Lazy<>(this::doPause); + } + + @Override + public ResumableFileUpload pause() { + return resumableFileUpload.getValue(); + } + + private ResumableFileUpload doPause() { + File sourceFile = request.source().toFile(); + if (completionFuture.isDone()) { + Instant fileLastModified = Instant.ofEpochMilli(sourceFile.lastModified()); + return ResumableFileUpload.builder() + .fileLastModified(fileLastModified) + .fileLength(sourceFile.length()) + .uploadFileRequest(request) + .build(); + } + + + Instant fileLastModified = Instant.ofEpochMilli(sourceFile + .lastModified()); + ResumeToken token = null; + try { + token = observable.pause(); + } catch (CrtRuntimeException exception) { + // CRT throws exception if it is a single part + if (!exception.errorName.equals("AWS_ERROR_UNSUPPORTED_OPERATION")) { + throw exception; + } + } + + completionFuture.cancel(true); + // Upload hasn't started yet, or it's a single object upload + if (token == null) { + return ResumableFileUpload.builder() + .fileLastModified(fileLastModified) + .fileLength(sourceFile.length()) + .uploadFileRequest(request) + .build(); + } + + return ResumableFileUpload.builder() + .multipartUploadId(token.getUploadId()) + .totalParts(token.getTotalNumParts()) + .transferredParts(token.getNumPartsCompleted()) + .partSizeInBytes(token.getPartSize()) + .fileLastModified(fileLastModified) + .fileLength(sourceFile.length()) + .uploadFileRequest(request) + .build(); + } + + @Override + public CompletableFuture completionFuture() { + return completionFuture; + } + + @Override + public TransferProgress progress() { + return progress; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CrtFileUpload that = (CrtFileUpload) o; + + if (!resumableFileUpload.equals(that.resumableFileUpload)) { + return false; + } + if (!completionFuture.equals(that.completionFuture)) { + return false; + } + if (!progress.equals(that.progress)) { + return false; + } + if (!request.equals(that.request)) { + return false; + } + return observable == that.observable; + } + + @Override + public int hashCode() { + int result = resumableFileUpload.hashCode(); + result = 31 * result + completionFuture.hashCode(); + result = 31 * result + progress.hashCode(); + result = 31 * result + request.hashCode(); + result = 31 * result + observable.hashCode(); + return result; + } + + @Override + public String toString() { + return ToString.builder("DefaultFileUpload") + .add("completionFuture", completionFuture) + .add("progress", progress) + .add("request", request) + .build(); + } +} diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java index 1cfb2975a44e..1579c64dbdf1 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/internal/model/DefaultFileUpload.java @@ -15,99 +15,35 @@ package software.amazon.awssdk.transfer.s3.internal.model; -import java.io.File; -import java.time.Instant; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.crt.CrtRuntimeException; -import software.amazon.awssdk.crt.s3.ResumeToken; -import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; -import software.amazon.awssdk.transfer.s3.internal.S3ClientType; import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.progress.TransferProgress; -import software.amazon.awssdk.utils.Lazy; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; @SdkInternalApi public final class DefaultFileUpload implements FileUpload { - private final Lazy resumableFileUpload; private final CompletableFuture completionFuture; private final TransferProgress progress; private final UploadFileRequest request; - private final S3MetaRequestPauseObservable observable; - private final S3ClientType clientType; public DefaultFileUpload(CompletableFuture completionFuture, TransferProgress progress, - S3MetaRequestPauseObservable observable, - UploadFileRequest request, - S3ClientType clientType) { + UploadFileRequest request) { this.completionFuture = Validate.paramNotNull(completionFuture, "completionFuture"); this.progress = Validate.paramNotNull(progress, "progress"); - this.observable = Validate.paramNotNull(observable, "observable"); this.request = Validate.paramNotNull(request, "request"); - this.clientType = Validate.paramNotNull(clientType, "clientType"); - this.resumableFileUpload = new Lazy<>(this::doPause); } @Override public ResumableFileUpload pause() { - return resumableFileUpload.getValue(); - } - - private ResumableFileUpload doPause() { - if (clientType != S3ClientType.CRT_BASED) { - throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For " - + "upload pause support, pass a CRT-based S3 client to S3TransferManager " - + "instead: S3AsyncClient.crtBuilder().build();"); - } - - File sourceFile = request.source().toFile(); - if (completionFuture.isDone()) { - Instant fileLastModified = Instant.ofEpochMilli(sourceFile.lastModified()); - return ResumableFileUpload.builder() - .fileLastModified(fileLastModified) - .fileLength(sourceFile.length()) - .uploadFileRequest(request) - .build(); - } - - - Instant fileLastModified = Instant.ofEpochMilli(sourceFile - .lastModified()); - ResumeToken token = null; - try { - token = observable.pause(); - } catch (CrtRuntimeException exception) { - // CRT throws exception if it is a single part - if (!exception.errorName.equals("AWS_ERROR_UNSUPPORTED_OPERATION")) { - throw exception; - } - } - - completionFuture.cancel(true); - // Upload hasn't started yet, or it's a single object upload - if (token == null) { - return ResumableFileUpload.builder() - .fileLastModified(fileLastModified) - .fileLength(sourceFile.length()) - .uploadFileRequest(request) - .build(); - } - - return ResumableFileUpload.builder() - .multipartUploadId(token.getUploadId()) - .totalParts(token.getTotalNumParts()) - .transferredParts(token.getNumPartsCompleted()) - .partSizeInBytes(token.getPartSize()) - .fileLastModified(fileLastModified) - .fileLength(sourceFile.length()) - .uploadFileRequest(request) - .build(); + throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For " + + "upload pause support, pass an AWS CRT-based S3 client to S3TransferManager" + + "instead: S3AsyncClient.crtBuilder().build();"); } @Override @@ -131,32 +67,20 @@ public boolean equals(Object o) { DefaultFileUpload that = (DefaultFileUpload) o; - if (!resumableFileUpload.equals(that.resumableFileUpload)) { - return false; - } if (!completionFuture.equals(that.completionFuture)) { return false; } if (!progress.equals(that.progress)) { return false; } - if (!request.equals(that.request)) { - return false; - } - if (clientType != that.clientType) { - return false; - } - return observable == that.observable; + return request.equals(that.request); } @Override public int hashCode() { - int result = resumableFileUpload.hashCode(); - result = 31 * result + completionFuture.hashCode(); + int result = completionFuture.hashCode(); result = 31 * result + progress.hashCode(); result = 31 * result + request.hashCode(); - result = 31 * result + observable.hashCode(); - result = 31 * result + clientType.hashCode(); return result; } diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java new file mode 100644 index 000000000000..9aecc1c05f29 --- /dev/null +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtFileUploadTest.java @@ -0,0 +1,233 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.transfer.s3.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; + +import com.google.common.jimfs.Jimfs; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.time.Instant; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.crt.CrtRuntimeException; +import software.amazon.awssdk.crt.s3.ResumeToken; +import software.amazon.awssdk.crt.s3.S3MetaRequest; +import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.transfer.s3.internal.model.CrtFileUpload; +import software.amazon.awssdk.transfer.s3.internal.progress.DefaultTransferProgressSnapshot; +import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; +import software.amazon.awssdk.transfer.s3.progress.TransferProgress; + +class CrtFileUploadTest { + private static final int TOTAL_PARTS = 10; + private static final int NUM_OF_PARTS_COMPLETED = 5; + private static final long PART_SIZE_IN_BYTES = 8 * MB; + private static final String MULTIPART_UPLOAD_ID = "someId"; + private S3MetaRequest metaRequest; + private static FileSystem fileSystem; + private static File file; + private static ResumeToken token; + + @BeforeAll + public static void setUp() throws IOException { + fileSystem = Jimfs.newFileSystem(); + file = File.createTempFile("test", UUID.randomUUID().toString()); + Files.write(file.toPath(), RandomStringUtils.random(2000).getBytes(StandardCharsets.UTF_8)); + token = new ResumeToken(new ResumeToken.PutResumeTokenBuilder() + .withNumPartsCompleted(NUM_OF_PARTS_COMPLETED) + .withTotalNumParts(TOTAL_PARTS) + .withPartSize(PART_SIZE_IN_BYTES) + .withUploadId(MULTIPART_UPLOAD_ID)); + } + + @AfterAll + public static void tearDown() throws IOException { + file.delete(); + } + + @BeforeEach + void setUpBeforeEachTest() { + metaRequest = Mockito.mock(S3MetaRequest.class); + } + + @Test + void equals_hashcode() { + EqualsVerifier.forClass(CrtFileUpload.class) + .withNonnullFields("completionFuture", "progress", "request", "observable", "resumableFileUpload") + .withPrefabValues(S3MetaRequestPauseObservable.class, new S3MetaRequestPauseObservable(), + new S3MetaRequestPauseObservable()) + .verify(); + } + + @Test + void pause_futureCompleted_shouldReturnNormally() { + PutObjectResponse putObjectResponse = PutObjectResponse.builder() + .build(); + CompletableFuture future = + CompletableFuture.completedFuture(CompletedFileUpload.builder() + .response(putObjectResponse) + .build()); + TransferProgress transferProgress = Mockito.mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .sdkResponse(putObjectResponse) + .transferredBytes(0L) + .build()); + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + + UploadFileRequest request = uploadFileRequest(); + + CrtFileUpload fileUpload = + new CrtFileUpload(future, transferProgress, observable, request); + + observable.subscribe(metaRequest); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + Mockito.verify(metaRequest, Mockito.never()).pause(); + assertThat(resumableFileUpload.totalParts()).isEmpty(); + assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); + assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + @Test + void pauseTwice_shouldReturnTheSame() { + CompletableFuture future = + new CompletableFuture<>(); + TransferProgress transferProgress = Mockito.mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .transferredBytes(1000L) + .build()); + UploadFileRequest request = uploadFileRequest(); + + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + when(metaRequest.pause()).thenReturn(token); + observable.subscribe(metaRequest); + + CrtFileUpload fileUpload = + new CrtFileUpload(future, transferProgress, observable, request); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + ResumableFileUpload resumableFileUpload2 = fileUpload.pause(); + + assertThat(resumableFileUpload).isEqualTo(resumableFileUpload2); + } + + @Test + void pause_crtThrowException_shouldPropogate() { + CompletableFuture future = + new CompletableFuture<>(); + TransferProgress transferProgress = Mockito.mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .transferredBytes(1000L) + .build()); + UploadFileRequest request = uploadFileRequest(); + + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + CrtRuntimeException exception = new CrtRuntimeException("exception"); + when(metaRequest.pause()).thenThrow(exception); + observable.subscribe(metaRequest); + + CrtFileUpload fileUpload = + new CrtFileUpload(future, transferProgress, observable, request); + + assertThatThrownBy(() -> fileUpload.pause()).isSameAs(exception); + } + + @Test + void pause_futureNotComplete_shouldPause() { + CompletableFuture future = + new CompletableFuture<>(); + TransferProgress transferProgress = Mockito.mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .transferredBytes(0L) + .build()); + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + when(metaRequest.pause()).thenReturn(token); + UploadFileRequest request = uploadFileRequest(); + + CrtFileUpload fileUpload = + new CrtFileUpload(future, transferProgress, observable, request); + + observable.subscribe(metaRequest); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + Mockito.verify(metaRequest).pause(); + assertThat(resumableFileUpload.totalParts()).hasValue(TOTAL_PARTS); + assertThat(resumableFileUpload.partSizeInBytes()).hasValue(PART_SIZE_IN_BYTES); + assertThat(resumableFileUpload.multipartUploadId()).hasValue(MULTIPART_UPLOAD_ID); + assertThat(resumableFileUpload.transferredParts()).hasValue(NUM_OF_PARTS_COMPLETED); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + @Test + void pause_singlePart_shouldPause() { + PutObjectResponse putObjectResponse = PutObjectResponse.builder() + .build(); + CompletableFuture future = + new CompletableFuture<>(); + TransferProgress transferProgress = Mockito.mock(TransferProgress.class); + when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() + .sdkResponse(putObjectResponse) + .transferredBytes(0L) + .build()); + S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); + when(metaRequest.pause()).thenThrow(new CrtRuntimeException(6)); + UploadFileRequest request = uploadFileRequest(); + + CrtFileUpload fileUpload = + new CrtFileUpload(future, transferProgress, observable, request); + + observable.subscribe(metaRequest); + + ResumableFileUpload resumableFileUpload = fileUpload.pause(); + Mockito.verify(metaRequest).pause(); + assertThat(resumableFileUpload.totalParts()).isEmpty(); + assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); + assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); + assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); + assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); + assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); + } + + + private UploadFileRequest uploadFileRequest() { + return UploadFileRequest.builder() + .source(file) + .putObjectRequest(p -> p.key("test").bucket("bucket")) + .build(); + } +} \ No newline at end of file diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerPauseAndResumeTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtTransferManagerPauseAndResumeTest.java similarity index 98% rename from services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerPauseAndResumeTest.java rename to services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtTransferManagerPauseAndResumeTest.java index 181e0e723fab..8544e37a6470 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerPauseAndResumeTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/CrtTransferManagerPauseAndResumeTest.java @@ -38,7 +38,6 @@ import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkException; -import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; @@ -51,7 +50,7 @@ import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; import software.amazon.awssdk.utils.CompletableFutureUtils; -class S3TransferManagerPauseAndResumeTest { +class CrtTransferManagerPauseAndResumeTest { private S3CrtAsyncClient mockS3Crt; private S3TransferManager tm; private UploadDirectoryHelper uploadDirectoryHelper; @@ -67,7 +66,7 @@ public void methodSetup() throws IOException { uploadDirectoryHelper = mock(UploadDirectoryHelper.class); configuration = mock(TransferManagerConfiguration.class); downloadDirectoryHelper = mock(DownloadDirectoryHelper.class); - tm = new DefaultS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); + tm = new CrtS3TransferManager(configuration, mockS3Crt, false); } @AfterEach diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java index 41979405ae94..539433734920 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/DefaultFileUploadTest.java @@ -15,236 +15,38 @@ package software.amazon.awssdk.transfer.s3.internal; -import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.Mockito.when; -import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; -import com.google.common.jimfs.Jimfs; -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.time.Instant; -import java.util.UUID; +import java.nio.file.Paths; import java.util.concurrent.CompletableFuture; import nl.jqno.equalsverifier.EqualsVerifier; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; -import software.amazon.awssdk.crt.CrtRuntimeException; -import software.amazon.awssdk.crt.s3.ResumeToken; -import software.amazon.awssdk.crt.s3.S3MetaRequest; -import software.amazon.awssdk.services.s3.internal.crt.S3MetaRequestPauseObservable; -import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.internal.model.DefaultFileUpload; -import software.amazon.awssdk.transfer.s3.internal.progress.DefaultTransferProgressSnapshot; -import software.amazon.awssdk.transfer.s3.model.CompletedFileUpload; -import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.progress.TransferProgress; class DefaultFileUploadTest { - private static final int TOTAL_PARTS = 10; - private static final int NUM_OF_PARTS_COMPLETED = 5; - private static final long PART_SIZE_IN_BYTES = 8 * MB; - private static final String MULTIPART_UPLOAD_ID = "someId"; - private S3MetaRequest metaRequest; - private static FileSystem fileSystem; - private static File file; - private static ResumeToken token; - - @BeforeAll - public static void setUp() throws IOException { - fileSystem = Jimfs.newFileSystem(); - file = File.createTempFile("test", UUID.randomUUID().toString()); - Files.write(file.toPath(), RandomStringUtils.random(2000).getBytes(StandardCharsets.UTF_8)); - token = new ResumeToken(new ResumeToken.PutResumeTokenBuilder() - .withNumPartsCompleted(NUM_OF_PARTS_COMPLETED) - .withTotalNumParts(TOTAL_PARTS) - .withPartSize(PART_SIZE_IN_BYTES) - .withUploadId(MULTIPART_UPLOAD_ID)); - } - - @AfterAll - public static void tearDown() throws IOException { - file.delete(); - } - - @BeforeEach - void setUpBeforeEachTest() { - metaRequest = Mockito.mock(S3MetaRequest.class); - } - @Test void equals_hashcode() { EqualsVerifier.forClass(DefaultFileUpload.class) - .withNonnullFields("completionFuture", "progress", "request", "observable", "resumableFileUpload", - "clientType") - .withPrefabValues(S3MetaRequestPauseObservable.class, new S3MetaRequestPauseObservable(), - new S3MetaRequestPauseObservable()) + .withNonnullFields("completionFuture", "progress", "request") .verify(); } @Test - void pause_futureCompleted_shouldReturnNormally() { - PutObjectResponse putObjectResponse = PutObjectResponse.builder() - .build(); - CompletableFuture future = - CompletableFuture.completedFuture(CompletedFileUpload.builder() - .response(putObjectResponse) - .build()); - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .sdkResponse(putObjectResponse) - .transferredBytes(0L) - .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - - UploadFileRequest request = uploadFileRequest(); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.CRT_BASED); - - observable.subscribe(metaRequest); - - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest, Mockito.never()).pause(); - assertThat(resumableFileUpload.totalParts()).isEmpty(); - assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); - assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); - assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); - assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); - assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); - } - - @Test - void pauseTwice_shouldReturnTheSame() { - CompletableFuture future = - new CompletableFuture<>(); - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .transferredBytes(1000L) - .build()); - UploadFileRequest request = uploadFileRequest(); - - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenReturn(token); - observable.subscribe(metaRequest); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.CRT_BASED); - - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - ResumableFileUpload resumableFileUpload2 = fileUpload.pause(); - - assertThat(resumableFileUpload).isEqualTo(resumableFileUpload2); - } - - @Test - void pause_crtThrowException_shouldPropogate() { - CompletableFuture future = - new CompletableFuture<>(); - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .transferredBytes(1000L) - .build()); - UploadFileRequest request = uploadFileRequest(); - - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - CrtRuntimeException exception = new CrtRuntimeException("exception"); - when(metaRequest.pause()).thenThrow(exception); - observable.subscribe(metaRequest); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.CRT_BASED); - - assertThatThrownBy(() -> fileUpload.pause()).isSameAs(exception); - } - - @Test - void pause_futureNotComplete_shouldPause() { - CompletableFuture future = - new CompletableFuture<>(); - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .transferredBytes(0L) - .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenReturn(token); - UploadFileRequest request = uploadFileRequest(); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.CRT_BASED); - - observable.subscribe(metaRequest); - - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest).pause(); - assertThat(resumableFileUpload.totalParts()).hasValue(TOTAL_PARTS); - assertThat(resumableFileUpload.partSizeInBytes()).hasValue(PART_SIZE_IN_BYTES); - assertThat(resumableFileUpload.multipartUploadId()).hasValue(MULTIPART_UPLOAD_ID); - assertThat(resumableFileUpload.transferredParts()).hasValue(NUM_OF_PARTS_COMPLETED); - assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); - assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); - assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); - } - - @Test - void pause_singlePart_shouldPause() { - PutObjectResponse putObjectResponse = PutObjectResponse.builder() - .build(); - CompletableFuture future = - new CompletableFuture<>(); - TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .sdkResponse(putObjectResponse) - .transferredBytes(0L) - .build()); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - when(metaRequest.pause()).thenThrow(new CrtRuntimeException(6)); - UploadFileRequest request = uploadFileRequest(); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.CRT_BASED); - - observable.subscribe(metaRequest); - - ResumableFileUpload resumableFileUpload = fileUpload.pause(); - Mockito.verify(metaRequest).pause(); - assertThat(resumableFileUpload.totalParts()).isEmpty(); - assertThat(resumableFileUpload.partSizeInBytes()).isEmpty(); - assertThat(resumableFileUpload.multipartUploadId()).isEmpty(); - assertThat(resumableFileUpload.fileLength()).isEqualTo(file.length()); - assertThat(resumableFileUpload.uploadFileRequest()).isEqualTo(request); - assertThat(resumableFileUpload.fileLastModified()).isEqualTo(Instant.ofEpochMilli(file.lastModified())); - } - - @Test - void pause_nonCrtBasedS3Client_shouldThrowUnsupportedException() { - CompletableFuture future = - new CompletableFuture<>(); + void pause_shouldThrowUnsupportedOperation() { TransferProgress transferProgress = Mockito.mock(TransferProgress.class); - when(transferProgress.snapshot()).thenReturn(DefaultTransferProgressSnapshot.builder() - .transferredBytes(1000L) - .build()); - UploadFileRequest request = uploadFileRequest(); - S3MetaRequestPauseObservable observable = new S3MetaRequestPauseObservable(); - - DefaultFileUpload fileUpload = - new DefaultFileUpload(future, transferProgress, observable, request, S3ClientType.JAVA_BASED); - - assertThatThrownBy(fileUpload::pause).isInstanceOf(UnsupportedOperationException.class); - } - - private UploadFileRequest uploadFileRequest() { - return UploadFileRequest.builder() - .source(file) - .putObjectRequest(p -> p.key("test").bucket("bucket")) - .build(); + UploadFileRequest request = UploadFileRequest.builder() + .source(Paths.get("test")) + .putObjectRequest(p -> p.key("test").bucket("bucket")) + .build(); + FileUpload fileUpload = new DefaultFileUpload(new CompletableFuture<>(), + transferProgress, + request); + + assertThatThrownBy(() -> fileUpload.pause()).isInstanceOf(UnsupportedOperationException.class); } } \ No newline at end of file diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerListenerTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerListenerTest.java index 4ae87772182e..a9b7529cd9c1 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerListenerTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerListenerTest.java @@ -71,7 +71,7 @@ public class S3TransferManagerListenerTest { @BeforeEach public void methodSetup() { s3Crt = mock(S3CrtAsyncClient.class); - tm = new DefaultS3TransferManager(s3Crt, mock(UploadDirectoryHelper.class), mock(TransferManagerConfiguration.class), + tm = new GenericS3TransferManager(s3Crt, mock(UploadDirectoryHelper.class), mock(TransferManagerConfiguration.class), mock(DownloadDirectoryHelper.class)); contentLength = 1024L; when(s3Crt.putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class))) diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerTest.java index aa6738c7b31d..50e05abbafaf 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerTest.java @@ -23,7 +23,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.nio.file.Paths; import java.util.concurrent.CompletableFuture; import org.junit.jupiter.api.AfterEach; @@ -32,7 +31,6 @@ import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; -import software.amazon.awssdk.core.async.ResponsePublisher; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.CopyObjectResponse; @@ -66,7 +64,7 @@ public void methodSetup() { uploadDirectoryHelper = mock(UploadDirectoryHelper.class); configuration = mock(TransferManagerConfiguration.class); downloadDirectoryHelper = mock(DownloadDirectoryHelper.class); - tm = new DefaultS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); + tm = new GenericS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); } @AfterEach @@ -343,7 +341,7 @@ void downloadDirectory_throwException_shouldCompleteFutureExceptionally() { @Test void close_shouldCloseUnderlyingResources() { - S3TransferManager transferManager = new DefaultS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); + S3TransferManager transferManager = new GenericS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); transferManager.close(); verify(mockS3Crt, times(0)).close(); verify(configuration).close(); @@ -352,7 +350,7 @@ void close_shouldCloseUnderlyingResources() { @Test void close_shouldNotCloseCloseS3AsyncClientPassedInBuilder_when_transferManagerClosed() { S3TransferManager transferManager = - DefaultS3TransferManager.builder().s3Client(mockS3Crt).build(); + S3TransferManager.builder().s3Client(mockS3Crt).build(); transferManager.close(); verify(mockS3Crt, times(0)).close(); } diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java index ebe0a8178150..d1d998c055d8 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/S3TransferManagerUploadPauseAndResumeTest.java @@ -65,7 +65,7 @@ public void methodSetup() throws IOException { uploadDirectoryHelper = mock(UploadDirectoryHelper.class); configuration = mock(TransferManagerConfiguration.class); downloadDirectoryHelper = mock(DownloadDirectoryHelper.class); - tm = new DefaultS3TransferManager(mockS3Crt, uploadDirectoryHelper, configuration, downloadDirectoryHelper); + tm = new CrtS3TransferManager(configuration, mockS3Crt, false); } @AfterEach diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java index c2ced8bfd65b..aba7cc86ae0d 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/internal/UploadDirectoryHelperTest.java @@ -443,10 +443,9 @@ private DefaultFileUpload completedUpload() { new DefaultTransferProgress(DefaultTransferProgressSnapshot.builder() .transferredBytes(0L) .build()), - new S3MetaRequestPauseObservable(), UploadFileRequest.builder() .source(Paths.get(".")).putObjectRequest(b -> b.bucket("bucket").key("key")) - .build(), S3ClientType.CRT_BASED); + .build()); } private FileUpload newUpload(CompletableFuture future) { @@ -454,11 +453,10 @@ private FileUpload newUpload(CompletableFuture future) { new DefaultTransferProgress(DefaultTransferProgressSnapshot.builder() .transferredBytes(0L) .build()), - new S3MetaRequestPauseObservable(), UploadFileRequest.builder() .putObjectRequest(p -> p.key("key").bucket("bucket")).source(Paths.get( "test.txt")) - .build(), S3ClientType.CRT_BASED); + .build()); } private Path createJimFsTestDirectory(FileSystem fileSystem) { diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 5b9d6e377d21..312499815202 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index 2be6bb1ecce5..64e3938bbc4e 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json index bcb16c007371..e9b05bd2c3f8 100644 --- a/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,208 +138,40 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://account.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "account", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -357,208 +189,40 @@ }, "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://account.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "account", + "signingRegion": "cn-northwest-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -762,60 +426,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/account/src/main/resources/codegen-resources/endpoint-tests.json b/services/account/src/main/resources/codegen-resources/endpoint-tests.json index b1e600ee2323..ac318cb0f9c7 100644 --- a/services/account/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/account/src/main/resources/codegen-resources/endpoint-tests.json @@ -218,6 +218,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -231,6 +242,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -244,6 +266,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -257,6 +290,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -320,6 +364,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/account/src/main/resources/codegen-resources/paginators-1.json b/services/account/src/main/resources/codegen-resources/paginators-1.json index cdd3aae8c98f..5e75ec80cb62 100644 --- a/services/account/src/main/resources/codegen-resources/paginators-1.json +++ b/services/account/src/main/resources/codegen-resources/paginators-1.json @@ -3,7 +3,8 @@ "ListRegions": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Regions" } } } diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 9007e29da015..855a3b913294 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 997107e32db2..06e403ae2d37 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json index 6979e0ffff82..ad3be5fe1b49 100644 --- a/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,199 +111,263 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsDualStack" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://acm-pca-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://acm-pca.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json b/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json index 0fda4c37f98a..d1aad24c9dda 100644 --- a/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,980 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-northeast-3.amazonaws.com" + "url": "https://acm-pca.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-northeast-2.amazonaws.com" + "url": "https://acm-pca.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -983,680 +34,524 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-south-1.api.aws" - } - }, - "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.sa-east-1.amazonaws.com" + "url": "https://acm-pca.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-east-1.amazonaws.com" + "url": "https://acm-pca.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.cn-north-1.amazonaws.com.cn" + "url": "https://acm-pca.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "cn-north-1", + "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://acm-pca.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-north-1.amazonaws.com.cn" + "url": "https://acm-pca.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.amazonaws.com" + "url": "https://acm-pca.ap-southeast-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ap-southeast-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.api.aws" + "url": "https://acm-pca.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.amazonaws.com" + "url": "https://acm-pca-fips.ca-central-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-gov-west-1", + "Region": "ca-central-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-1.api.aws" + "url": "https://acm-pca.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-1.amazonaws.com" + "url": "https://acm-pca.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-1.api.aws" + "url": "https://acm-pca.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-1.amazonaws.com" + "url": "https://acm-pca.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-2.api.aws" + "url": "https://acm-pca.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-2.amazonaws.com" + "url": "https://acm-pca.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-2.api.aws" + "url": "https://acm-pca.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-2.amazonaws.com" + "url": "https://acm-pca.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://acm-pca.us-east-1.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://acm-pca-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://acm-pca.us-east-2.amazonaws.com" + } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-iso-east-1.c2s.ic.gov" + "url": "https://acm-pca-fips.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-3.api.aws" + "url": "https://acm-pca.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-3.amazonaws.com" + "url": "https://acm-pca-fips.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-3.api.aws" + "url": "https://acm-pca.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-3.amazonaws.com" + "url": "https://acm-pca-fips.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-3", + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-4.api.aws" + "url": "https://acm-pca-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-4.amazonaws.com" + "url": "https://acm-pca.us-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-4.api.aws" + "url": "https://acm-pca-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-4.amazonaws.com" + "url": "https://acm-pca-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-1.api.aws" + "url": "https://acm-pca.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-1.amazonaws.com" + "url": "https://acm-pca.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-1.api.aws" + "url": "https://acm-pca.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-1.amazonaws.com" + "url": "https://acm-pca.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-2.api.aws" + "url": "https://acm-pca.us-gov-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-2.amazonaws.com" + "url": "https://acm-pca.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-2.api.aws" + "url": "https://acm-pca-fips.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "us-east-2", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-2.amazonaws.com" + "url": "https://acm-pca.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://acm-pca-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://acm-pca-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://acm-pca.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-northwest-1.amazonaws.com.cn" + "url": "https://acm-pca.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1666,8 +561,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +574,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,8 +585,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -1703,21 +598,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/acmpca/src/main/resources/codegen-resources/service-2.json b/services/acmpca/src/main/resources/codegen-resources/service-2.json index ee020a89da80..71e71b0a92bc 100644 --- a/services/acmpca/src/main/resources/codegen-resources/service-2.json +++ b/services/acmpca/src/main/resources/codegen-resources/service-2.json @@ -810,7 +810,7 @@ }, "KeyStorageSecurityStandard":{ "shape":"KeyStorageSecurityStandard", - "documentation":"

      Specifies a cryptographic key management compliance standard used for handling CA keys.

      Default: FIPS_140_2_LEVEL_3_OR_HIGHER

      Note: FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in the following Regions:

      • ap-northeast-3

      • ap-southeast-3

      When creating a CA in these Regions, you must provide FIPS_140_2_LEVEL_2_OR_HIGHER as the argument for KeyStorageSecurityStandard. Failure to do this results in an InvalidArgsException with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"

      " + "documentation":"

      Specifies a cryptographic key management compliance standard used for handling CA keys.

      Default: FIPS_140_2_LEVEL_3_OR_HIGHER

      Some Amazon Web Services Regions do not support the default. When creating a CA in these Regions, you must provide FIPS_140_2_LEVEL_2_OR_HIGHER as the argument for KeyStorageSecurityStandard. Failure to do this results in an InvalidArgsException with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"

      For information about security standard support in various Regions, see Storage and security compliance of Amazon Web Services Private CA private keys.

      " }, "Tags":{ "shape":"TagList", @@ -1390,7 +1390,7 @@ }, "SigningAlgorithm":{ "shape":"SigningAlgorithm", - "documentation":"

      The name of the algorithm that will be used to sign the certificate to be issued.

      This parameter should not be confused with the SigningAlgorithm parameter used to sign a CSR in the CreateCertificateAuthority action.

      The specified signing algorithm family (RSA or ECDSA) much match the algorithm family of the CA's secret key.

      " + "documentation":"

      The name of the algorithm that will be used to sign the certificate to be issued.

      This parameter should not be confused with the SigningAlgorithm parameter used to sign a CSR in the CreateCertificateAuthority action.

      The specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key.

      " }, "TemplateArn":{ "shape":"Arn", @@ -1402,7 +1402,7 @@ }, "ValidityNotBefore":{ "shape":"Validity", - "documentation":"

      Information describing the start of the validity period of the certificate. This parameter sets the “Not Before\" date for the certificate.

      By default, when issuing a certificate, Amazon Web Services Private CA sets the \"Not Before\" date to the issuance time minus 60 minutes. This compensates for clock inconsistencies across computer systems. The ValidityNotBefore parameter can be used to customize the “Not Before” value.

      Unlike the Validity parameter, the ValidityNotBefore parameter is optional.

      The ValidityNotBefore value is expressed as an explicit date and time, using the Validity type value ABSOLUTE. For more information, see Validity in this API reference and Validity in RFC 5280.

      " + "documentation":"

      Information describing the start of the validity period of the certificate. This parameter sets the “Not Before\" date for the certificate.

      By default, when issuing a certificate, Amazon Web Services Private CA sets the \"Not Before\" date to the issuance time minus 60 minutes. This compensates for clock inconsistencies across computer systems. The ValidityNotBefore parameter can be used to customize the “Not Before” value.

      Unlike the Validity parameter, the ValidityNotBefore parameter is optional.

      The ValidityNotBefore value is expressed as an explicit date and time, using the Validity type value ABSOLUTE. For more information, see Validity in this API reference and Validity in RFC 5280.

      " }, "IdempotencyToken":{ "shape":"IdempotencyToken", diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index 2cb4c2d1a982..0d57dfeddd4f 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-rule-set.json index 3ebbb8c47b0d..da4c0c1adabd 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://a4b-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://a4b-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://a4b-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://a4b.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://a4b-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://a4b.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://a4b.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://a4b.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-tests.json b/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-tests.json index 8d1bc5a284ae..de7a21af1d8d 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,18 @@ { "testCases": [ + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -8,9 +21,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -21,9 +34,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -34,35 +47,235 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://a4b.us-east-1.amazonaws.com" + "url": "https://a4b-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://a4b.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://a4b-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://a4b.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://a4b.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -72,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -84,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json b/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json index 57078847502f..9aa7a8702d9b 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json @@ -25,7 +25,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Associates a skill with the organization under the customer's AWS account. If a skill is private, the user implicitly accepts access to this skill during enablement.

      " + "documentation":"

      Associates a skill with the organization under the customer's AWS account. If a skill is private, the user implicitly accepts access to this skill during enablement.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateContactWithAddressBook":{ "name":"AssociateContactWithAddressBook", @@ -38,7 +40,9 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

      Associates a contact with a given address book.

      " + "documentation":"

      Associates a contact with a given address book.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateDeviceWithNetworkProfile":{ "name":"AssociateDeviceWithNetworkProfile", @@ -53,7 +57,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"DeviceNotRegisteredException"} ], - "documentation":"

      Associates a device with the specified network profile.

      " + "documentation":"

      Associates a device with the specified network profile.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateDeviceWithRoom":{ "name":"AssociateDeviceWithRoom", @@ -68,7 +74,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"DeviceNotRegisteredException"} ], - "documentation":"

      Associates a device with a given room. This applies all the settings from the room profile to the device, and all the skills in any skill groups added to that room. This operation requires the device to be online, or else a manual sync is required.

      " + "documentation":"

      Associates a device with a given room. This applies all the settings from the room profile to the device, and all the skills in any skill groups added to that room. This operation requires the device to be online, or else a manual sync is required.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateSkillGroupWithRoom":{ "name":"AssociateSkillGroupWithRoom", @@ -81,7 +89,9 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Associates a skill group with a given room. This enables all skills in the associated skill group on all devices in the room.

      " + "documentation":"

      Associates a skill group with a given room. This enables all skills in the associated skill group on all devices in the room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateSkillWithSkillGroup":{ "name":"AssociateSkillWithSkillGroup", @@ -96,7 +106,9 @@ {"shape":"NotFoundException"}, {"shape":"SkillNotLinkedException"} ], - "documentation":"

      Associates a skill with a skill group.

      " + "documentation":"

      Associates a skill with a skill group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "AssociateSkillWithUsers":{ "name":"AssociateSkillWithUsers", @@ -110,7 +122,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

      Makes a private skill available for enrolled users to enable on their devices.

      " + "documentation":"

      Makes a private skill available for enrolled users to enable on their devices.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateAddressBook":{ "name":"CreateAddressBook", @@ -124,7 +138,9 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

      Creates an address book with the specified details.

      " + "documentation":"

      Creates an address book with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateBusinessReportSchedule":{ "name":"CreateBusinessReportSchedule", @@ -137,7 +153,9 @@ "errors":[ {"shape":"AlreadyExistsException"} ], - "documentation":"

      Creates a recurring schedule for usage reports to deliver to the specified S3 location with a specified daily or weekly interval.

      " + "documentation":"

      Creates a recurring schedule for usage reports to deliver to the specified S3 location with a specified daily or weekly interval.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateConferenceProvider":{ "name":"CreateConferenceProvider", @@ -150,7 +168,9 @@ "errors":[ {"shape":"AlreadyExistsException"} ], - "documentation":"

      Adds a new conference provider under the user's AWS account.

      " + "documentation":"

      Adds a new conference provider under the user's AWS account.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateContact":{ "name":"CreateContact", @@ -164,7 +184,9 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

      Creates a contact with the specified details.

      " + "documentation":"

      Creates a contact with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateGatewayGroup":{ "name":"CreateGatewayGroup", @@ -178,7 +200,9 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

      Creates a gateway group with the specified details.

      " + "documentation":"

      Creates a gateway group with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateNetworkProfile":{ "name":"CreateNetworkProfile", @@ -195,7 +219,9 @@ {"shape":"InvalidCertificateAuthorityException"}, {"shape":"InvalidServiceLinkedRoleStateException"} ], - "documentation":"

      Creates a network profile with the specified details.

      " + "documentation":"

      Creates a network profile with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateProfile":{ "name":"CreateProfile", @@ -210,7 +236,9 @@ {"shape":"AlreadyExistsException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Creates a new room profile with the specified details.

      " + "documentation":"

      Creates a new room profile with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateRoom":{ "name":"CreateRoom", @@ -224,7 +252,9 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

      Creates a room with the specified details.

      " + "documentation":"

      Creates a room with the specified details.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateSkillGroup":{ "name":"CreateSkillGroup", @@ -239,7 +269,9 @@ {"shape":"LimitExceededException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Creates a skill group with a specified name and description.

      " + "documentation":"

      Creates a skill group with a specified name and description.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "CreateUser":{ "name":"CreateUser", @@ -254,7 +286,9 @@ {"shape":"LimitExceededException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Creates a user.

      " + "documentation":"

      Creates a user.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteAddressBook":{ "name":"DeleteAddressBook", @@ -268,7 +302,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes an address book by the address book ARN.

      " + "documentation":"

      Deletes an address book by the address book ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteBusinessReportSchedule":{ "name":"DeleteBusinessReportSchedule", @@ -282,7 +318,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes the recurring report delivery schedule with the specified schedule ARN.

      " + "documentation":"

      Deletes the recurring report delivery schedule with the specified schedule ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteConferenceProvider":{ "name":"DeleteConferenceProvider", @@ -295,7 +333,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Deletes a conference provider.

      " + "documentation":"

      Deletes a conference provider.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteContact":{ "name":"DeleteContact", @@ -309,7 +349,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a contact by the contact ARN.

      " + "documentation":"

      Deletes a contact by the contact ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteDevice":{ "name":"DeleteDevice", @@ -324,7 +366,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InvalidCertificateAuthorityException"} ], - "documentation":"

      Removes a device from Alexa For Business.

      " + "documentation":"

      Removes a device from Alexa For Business.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteDeviceUsageData":{ "name":"DeleteDeviceUsageData", @@ -339,7 +383,9 @@ {"shape":"DeviceNotRegisteredException"}, {"shape":"LimitExceededException"} ], - "documentation":"

      When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

      " + "documentation":"

      When this action is called for a specified shared device, it allows authorized users to delete the device's entire previous history of voice input data and associated response data. This action can be called once every 24 hours for a specific shared device.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteGatewayGroup":{ "name":"DeleteGatewayGroup", @@ -352,7 +398,9 @@ "errors":[ {"shape":"ResourceAssociatedException"} ], - "documentation":"

      Deletes a gateway group.

      " + "documentation":"

      Deletes a gateway group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteNetworkProfile":{ "name":"DeleteNetworkProfile", @@ -367,7 +415,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

      Deletes a network profile by the network profile ARN.

      " + "documentation":"

      Deletes a network profile by the network profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteProfile":{ "name":"DeleteProfile", @@ -381,7 +431,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a room profile by the profile ARN.

      " + "documentation":"

      Deletes a room profile by the profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteRoom":{ "name":"DeleteRoom", @@ -395,7 +447,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a room by the room ARN.

      " + "documentation":"

      Deletes a room by the room ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteRoomSkillParameter":{ "name":"DeleteRoomSkillParameter", @@ -408,7 +462,9 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes room skill parameter details by room, skill, and parameter key ID.

      " + "documentation":"

      Deletes room skill parameter details by room, skill, and parameter key ID.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteSkillAuthorization":{ "name":"DeleteSkillAuthorization", @@ -422,7 +478,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Unlinks a third-party account from a skill.

      " + "documentation":"

      Unlinks a third-party account from a skill.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteSkillGroup":{ "name":"DeleteSkillGroup", @@ -436,7 +494,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a skill group by skill group ARN.

      " + "documentation":"

      Deletes a skill group by skill group ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DeleteUser":{ "name":"DeleteUser", @@ -450,7 +510,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a specified user by user ARN and enrollment ARN.

      " + "documentation":"

      Deletes a specified user by user ARN and enrollment ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DisassociateContactFromAddressBook":{ "name":"DisassociateContactFromAddressBook", @@ -460,7 +522,9 @@ }, "input":{"shape":"DisassociateContactFromAddressBookRequest"}, "output":{"shape":"DisassociateContactFromAddressBookResponse"}, - "documentation":"

      Disassociates a contact from a given address book.

      " + "documentation":"

      Disassociates a contact from a given address book.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DisassociateDeviceFromRoom":{ "name":"DisassociateDeviceFromRoom", @@ -474,7 +538,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"DeviceNotRegisteredException"} ], - "documentation":"

      Disassociates a device from its current room. The device continues to be connected to the Wi-Fi network and is still registered to the account. The device settings and skills are removed from the room.

      " + "documentation":"

      Disassociates a device from its current room. The device continues to be connected to the Wi-Fi network and is still registered to the account. The device settings and skills are removed from the room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DisassociateSkillFromSkillGroup":{ "name":"DisassociateSkillFromSkillGroup", @@ -488,7 +554,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

      Disassociates a skill from a skill group.

      " + "documentation":"

      Disassociates a skill from a skill group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DisassociateSkillFromUsers":{ "name":"DisassociateSkillFromUsers", @@ -502,7 +570,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

      Makes a private skill unavailable for enrolled users and prevents them from enabling it on their devices.

      " + "documentation":"

      Makes a private skill unavailable for enrolled users and prevents them from enabling it on their devices.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "DisassociateSkillGroupFromRoom":{ "name":"DisassociateSkillGroupFromRoom", @@ -515,7 +585,9 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Disassociates a skill group from a specified room. This disables all skills in the skill group on all devices in the room.

      " + "documentation":"

      Disassociates a skill group from a specified room. This disables all skills in the skill group on all devices in the room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ForgetSmartHomeAppliances":{ "name":"ForgetSmartHomeAppliances", @@ -528,7 +600,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Forgets smart home appliances associated to a room.

      " + "documentation":"

      Forgets smart home appliances associated to a room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetAddressBook":{ "name":"GetAddressBook", @@ -541,7 +615,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets address the book details by the address book ARN.

      " + "documentation":"

      Gets address the book details by the address book ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetConferencePreference":{ "name":"GetConferencePreference", @@ -554,7 +630,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Retrieves the existing conference preferences.

      " + "documentation":"

      Retrieves the existing conference preferences.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetConferenceProvider":{ "name":"GetConferenceProvider", @@ -567,7 +645,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets details about a specific conference provider.

      " + "documentation":"

      Gets details about a specific conference provider.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetContact":{ "name":"GetContact", @@ -580,7 +660,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets the contact details by the contact ARN.

      " + "documentation":"

      Gets the contact details by the contact ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetDevice":{ "name":"GetDevice", @@ -593,7 +675,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets the details of a device by device ARN.

      " + "documentation":"

      Gets the details of a device by device ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetGateway":{ "name":"GetGateway", @@ -606,7 +690,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Retrieves the details of a gateway.

      " + "documentation":"

      Retrieves the details of a gateway.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetGatewayGroup":{ "name":"GetGatewayGroup", @@ -619,7 +705,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Retrieves the details of a gateway group.

      " + "documentation":"

      Retrieves the details of a gateway group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetInvitationConfiguration":{ "name":"GetInvitationConfiguration", @@ -632,7 +720,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Retrieves the configured values for the user enrollment invitation email template.

      " + "documentation":"

      Retrieves the configured values for the user enrollment invitation email template.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetNetworkProfile":{ "name":"GetNetworkProfile", @@ -646,7 +736,9 @@ {"shape":"NotFoundException"}, {"shape":"InvalidSecretsManagerResourceException"} ], - "documentation":"

      Gets the network profile details by the network profile ARN.

      " + "documentation":"

      Gets the network profile details by the network profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetProfile":{ "name":"GetProfile", @@ -659,7 +751,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets the details of a room profile by profile ARN.

      " + "documentation":"

      Gets the details of a room profile by profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetRoom":{ "name":"GetRoom", @@ -672,7 +766,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets room details by room ARN.

      " + "documentation":"

      Gets room details by room ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetRoomSkillParameter":{ "name":"GetRoomSkillParameter", @@ -685,7 +781,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets room skill parameter details by room, skill, and parameter key ARN.

      " + "documentation":"

      Gets room skill parameter details by room, skill, and parameter key ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "GetSkillGroup":{ "name":"GetSkillGroup", @@ -698,7 +796,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Gets skill group details by skill group ARN.

      " + "documentation":"

      Gets skill group details by skill group ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListBusinessReportSchedules":{ "name":"ListBusinessReportSchedules", @@ -708,7 +808,9 @@ }, "input":{"shape":"ListBusinessReportSchedulesRequest"}, "output":{"shape":"ListBusinessReportSchedulesResponse"}, - "documentation":"

      Lists the details of the schedules that a user configured. A download URL of the report associated with each schedule is returned every time this action is called. A new download URL is returned each time, and is valid for 24 hours.

      " + "documentation":"

      Lists the details of the schedules that a user configured. A download URL of the report associated with each schedule is returned every time this action is called. A new download URL is returned each time, and is valid for 24 hours.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListConferenceProviders":{ "name":"ListConferenceProviders", @@ -718,7 +820,9 @@ }, "input":{"shape":"ListConferenceProvidersRequest"}, "output":{"shape":"ListConferenceProvidersResponse"}, - "documentation":"

      Lists conference providers under a specific AWS account.

      " + "documentation":"

      Lists conference providers under a specific AWS account.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListDeviceEvents":{ "name":"ListDeviceEvents", @@ -731,7 +835,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Lists the device event history, including device connection status, for up to 30 days.

      " + "documentation":"

      Lists the device event history, including device connection status, for up to 30 days.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListGatewayGroups":{ "name":"ListGatewayGroups", @@ -741,7 +847,9 @@ }, "input":{"shape":"ListGatewayGroupsRequest"}, "output":{"shape":"ListGatewayGroupsResponse"}, - "documentation":"

      Retrieves a list of gateway group summaries. Use GetGatewayGroup to retrieve details of a specific gateway group.

      " + "documentation":"

      Retrieves a list of gateway group summaries. Use GetGatewayGroup to retrieve details of a specific gateway group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListGateways":{ "name":"ListGateways", @@ -751,7 +859,9 @@ }, "input":{"shape":"ListGatewaysRequest"}, "output":{"shape":"ListGatewaysResponse"}, - "documentation":"

      Retrieves a list of gateway summaries. Use GetGateway to retrieve details of a specific gateway. An optional gateway group ARN can be provided to only retrieve gateway summaries of gateways that are associated with that gateway group ARN.

      " + "documentation":"

      Retrieves a list of gateway summaries. Use GetGateway to retrieve details of a specific gateway. An optional gateway group ARN can be provided to only retrieve gateway summaries of gateways that are associated with that gateway group ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListSkills":{ "name":"ListSkills", @@ -761,7 +871,9 @@ }, "input":{"shape":"ListSkillsRequest"}, "output":{"shape":"ListSkillsResponse"}, - "documentation":"

      Lists all enabled skills in a specific skill group.

      " + "documentation":"

      Lists all enabled skills in a specific skill group.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListSkillsStoreCategories":{ "name":"ListSkillsStoreCategories", @@ -771,7 +883,9 @@ }, "input":{"shape":"ListSkillsStoreCategoriesRequest"}, "output":{"shape":"ListSkillsStoreCategoriesResponse"}, - "documentation":"

      Lists all categories in the Alexa skill store.

      " + "documentation":"

      Lists all categories in the Alexa skill store.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListSkillsStoreSkillsByCategory":{ "name":"ListSkillsStoreSkillsByCategory", @@ -781,7 +895,9 @@ }, "input":{"shape":"ListSkillsStoreSkillsByCategoryRequest"}, "output":{"shape":"ListSkillsStoreSkillsByCategoryResponse"}, - "documentation":"

      Lists all skills in the Alexa skill store by category.

      " + "documentation":"

      Lists all skills in the Alexa skill store by category.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListSmartHomeAppliances":{ "name":"ListSmartHomeAppliances", @@ -794,7 +910,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Lists all of the smart home appliances associated with a room.

      " + "documentation":"

      Lists all of the smart home appliances associated with a room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ListTags":{ "name":"ListTags", @@ -807,7 +925,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Lists all tags for the specified resource.

      " + "documentation":"

      Lists all tags for the specified resource.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "PutConferencePreference":{ "name":"PutConferencePreference", @@ -820,7 +940,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Sets the conference preferences on a specific conference provider at the account level.

      " + "documentation":"

      Sets the conference preferences on a specific conference provider at the account level.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "PutInvitationConfiguration":{ "name":"PutInvitationConfiguration", @@ -834,7 +956,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Configures the email template for the user enrollment invitation with the specified attributes.

      " + "documentation":"

      Configures the email template for the user enrollment invitation with the specified attributes.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "PutRoomSkillParameter":{ "name":"PutRoomSkillParameter", @@ -847,7 +971,9 @@ "errors":[ {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates room skill parameter details by room, skill, and parameter key ID. Not all skills have a room skill parameter.

      " + "documentation":"

      Updates room skill parameter details by room, skill, and parameter key ID. Not all skills have a room skill parameter.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "PutSkillAuthorization":{ "name":"PutSkillAuthorization", @@ -861,7 +987,9 @@ {"shape":"UnauthorizedException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Links a user's account to a third-party skill provider. If this API operation is called by an assumed IAM role, the skill being linked must be a private skill. Also, the skill must be owned by the AWS account that assumed the IAM role.

      " + "documentation":"

      Links a user's account to a third-party skill provider. If this API operation is called by an assumed IAM role, the skill being linked must be a private skill. Also, the skill must be owned by the AWS account that assumed the IAM role.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "RegisterAVSDevice":{ "name":"RegisterAVSDevice", @@ -877,7 +1005,9 @@ {"shape":"NotFoundException"}, {"shape":"InvalidDeviceException"} ], - "documentation":"

      Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using Alexa Voice Service (AVS).

      " + "documentation":"

      Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using Alexa Voice Service (AVS).

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "RejectSkill":{ "name":"RejectSkill", @@ -891,7 +1021,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

      Disassociates a skill from the organization under a user's AWS account. If the skill is a private skill, it moves to an AcceptStatus of PENDING. Any private or public skill that is rejected can be added later by calling the ApproveSkill API.

      " + "documentation":"

      Disassociates a skill from the organization under a user's AWS account. If the skill is a private skill, it moves to an AcceptStatus of PENDING. Any private or public skill that is rejected can be added later by calling the ApproveSkill API.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "ResolveRoom":{ "name":"ResolveRoom", @@ -904,7 +1036,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.

      To query ResolveRoom from an Alexa skill, the skill ID needs to be authorized. When the skill is using an AWS Lambda function, the skill is automatically authorized when you publish your skill as a private skill to your AWS account. Skills that are hosted using a custom web service must be manually authorized. To get your skill authorized, contact AWS Support with your AWS account ID that queries the ResolveRoom API and skill ID.

      " + "documentation":"

      Determines the details for the room from which a skill request was invoked. This operation is used by skill developers.

      To query ResolveRoom from an Alexa skill, the skill ID needs to be authorized. When the skill is using an AWS Lambda function, the skill is automatically authorized when you publish your skill as a private skill to your AWS account. Skills that are hosted using a custom web service must be manually authorized. To get your skill authorized, contact AWS Support with your AWS account ID that queries the ResolveRoom API and skill ID.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "RevokeInvitation":{ "name":"RevokeInvitation", @@ -918,7 +1052,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Revokes an invitation and invalidates the enrollment URL.

      " + "documentation":"

      Revokes an invitation and invalidates the enrollment URL.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchAddressBooks":{ "name":"SearchAddressBooks", @@ -928,7 +1064,9 @@ }, "input":{"shape":"SearchAddressBooksRequest"}, "output":{"shape":"SearchAddressBooksResponse"}, - "documentation":"

      Searches address books and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches address books and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchContacts":{ "name":"SearchContacts", @@ -938,7 +1076,9 @@ }, "input":{"shape":"SearchContactsRequest"}, "output":{"shape":"SearchContactsResponse"}, - "documentation":"

      Searches contacts and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches contacts and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchDevices":{ "name":"SearchDevices", @@ -948,7 +1088,9 @@ }, "input":{"shape":"SearchDevicesRequest"}, "output":{"shape":"SearchDevicesResponse"}, - "documentation":"

      Searches devices and lists the ones that meet a set of filter criteria.

      " + "documentation":"

      Searches devices and lists the ones that meet a set of filter criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchNetworkProfiles":{ "name":"SearchNetworkProfiles", @@ -958,7 +1100,9 @@ }, "input":{"shape":"SearchNetworkProfilesRequest"}, "output":{"shape":"SearchNetworkProfilesResponse"}, - "documentation":"

      Searches network profiles and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches network profiles and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchProfiles":{ "name":"SearchProfiles", @@ -968,7 +1112,9 @@ }, "input":{"shape":"SearchProfilesRequest"}, "output":{"shape":"SearchProfilesResponse"}, - "documentation":"

      Searches room profiles and lists the ones that meet a set of filter criteria.

      " + "documentation":"

      Searches room profiles and lists the ones that meet a set of filter criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchRooms":{ "name":"SearchRooms", @@ -978,7 +1124,9 @@ }, "input":{"shape":"SearchRoomsRequest"}, "output":{"shape":"SearchRoomsResponse"}, - "documentation":"

      Searches rooms and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches rooms and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchSkillGroups":{ "name":"SearchSkillGroups", @@ -988,7 +1136,9 @@ }, "input":{"shape":"SearchSkillGroupsRequest"}, "output":{"shape":"SearchSkillGroupsResponse"}, - "documentation":"

      Searches skill groups and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches skill groups and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SearchUsers":{ "name":"SearchUsers", @@ -998,7 +1148,9 @@ }, "input":{"shape":"SearchUsersRequest"}, "output":{"shape":"SearchUsersResponse"}, - "documentation":"

      Searches users and lists the ones that meet a set of filter and sort criteria.

      " + "documentation":"

      Searches users and lists the ones that meet a set of filter and sort criteria.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "SendAnnouncement":{ "name":"SendAnnouncement", @@ -1027,7 +1179,9 @@ {"shape":"InvalidUserStatusException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or until you call this operation again, whichever comes first.

      " + "documentation":"

      Sends an enrollment invitation email with a URL to a user. The URL is valid for 30 days or until you call this operation again, whichever comes first.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "StartDeviceSync":{ "name":"StartDeviceSync", @@ -1040,7 +1194,9 @@ "errors":[ {"shape":"DeviceNotRegisteredException"} ], - "documentation":"

      Resets a device and its account to the known default settings. This clears all information and settings set by previous users in the following ways:

      • Bluetooth - This unpairs all bluetooth devices paired with your echo device.

      • Volume - This resets the echo device's volume to the default value.

      • Notifications - This clears all notifications from your echo device.

      • Lists - This clears all to-do items from your echo device.

      • Settings - This internally syncs the room's profile (if the device is assigned to a room), contacts, address books, delegation access for account linking, and communications (if enabled on the room profile).

      " + "documentation":"

      Resets a device and its account to the known default settings. This clears all information and settings set by previous users in the following ways:

      • Bluetooth - This unpairs all bluetooth devices paired with your echo device.

      • Volume - This resets the echo device's volume to the default value.

      • Notifications - This clears all notifications from your echo device.

      • Lists - This clears all to-do items from your echo device.

      • Settings - This internally syncs the room's profile (if the device is assigned to a room), contacts, address books, delegation access for account linking, and communications (if enabled on the room profile).

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "StartSmartHomeApplianceDiscovery":{ "name":"StartSmartHomeApplianceDiscovery", @@ -1053,7 +1209,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Initiates the discovery of any smart home appliances associated with the room.

      " + "documentation":"

      Initiates the discovery of any smart home appliances associated with the room.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "TagResource":{ "name":"TagResource", @@ -1066,7 +1224,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Adds metadata tags to a specified resource.

      " + "documentation":"

      Adds metadata tags to a specified resource.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UntagResource":{ "name":"UntagResource", @@ -1079,7 +1239,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Removes metadata tags from a specified resource.

      " + "documentation":"

      Removes metadata tags from a specified resource.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateAddressBook":{ "name":"UpdateAddressBook", @@ -1094,7 +1256,9 @@ {"shape":"NameInUseException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates address book details by the address book ARN.

      " + "documentation":"

      Updates address book details by the address book ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateBusinessReportSchedule":{ "name":"UpdateBusinessReportSchedule", @@ -1108,7 +1272,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates the configuration of the report delivery schedule with the specified schedule ARN.

      " + "documentation":"

      Updates the configuration of the report delivery schedule with the specified schedule ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateConferenceProvider":{ "name":"UpdateConferenceProvider", @@ -1121,7 +1287,9 @@ "errors":[ {"shape":"NotFoundException"} ], - "documentation":"

      Updates an existing conference provider's settings.

      " + "documentation":"

      Updates an existing conference provider's settings.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateContact":{ "name":"UpdateContact", @@ -1135,7 +1303,9 @@ {"shape":"NotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates the contact details by the contact ARN.

      " + "documentation":"

      Updates the contact details by the contact ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateDevice":{ "name":"UpdateDevice", @@ -1150,7 +1320,9 @@ {"shape":"ConcurrentModificationException"}, {"shape":"DeviceNotRegisteredException"} ], - "documentation":"

      Updates the device name by device ARN.

      " + "documentation":"

      Updates the device name by device ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateGateway":{ "name":"UpdateGateway", @@ -1164,7 +1336,9 @@ {"shape":"NotFoundException"}, {"shape":"NameInUseException"} ], - "documentation":"

      Updates the details of a gateway. If any optional field is not provided, the existing corresponding value is left unmodified.

      " + "documentation":"

      Updates the details of a gateway. If any optional field is not provided, the existing corresponding value is left unmodified.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateGatewayGroup":{ "name":"UpdateGatewayGroup", @@ -1178,7 +1352,9 @@ {"shape":"NotFoundException"}, {"shape":"NameInUseException"} ], - "documentation":"

      Updates the details of a gateway group. If any optional field is not provided, the existing corresponding value is left unmodified.

      " + "documentation":"

      Updates the details of a gateway group. If any optional field is not provided, the existing corresponding value is left unmodified.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateNetworkProfile":{ "name":"UpdateNetworkProfile", @@ -1195,7 +1371,9 @@ {"shape":"InvalidCertificateAuthorityException"}, {"shape":"InvalidSecretsManagerResourceException"} ], - "documentation":"

      Updates a network profile by the network profile ARN.

      " + "documentation":"

      Updates a network profile by the network profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateProfile":{ "name":"UpdateProfile", @@ -1210,7 +1388,9 @@ {"shape":"NameInUseException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates an existing room profile by room profile ARN.

      " + "documentation":"

      Updates an existing room profile by room profile ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateRoom":{ "name":"UpdateRoom", @@ -1224,7 +1404,9 @@ {"shape":"NotFoundException"}, {"shape":"NameInUseException"} ], - "documentation":"

      Updates room details by room ARN.

      " + "documentation":"

      Updates room details by room ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" }, "UpdateSkillGroup":{ "name":"UpdateSkillGroup", @@ -1239,7 +1421,9 @@ {"shape":"NameInUseException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Updates skill group details by skill group ARN.

      " + "documentation":"

      Updates skill group details by skill group ARN.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" } }, "shapes":{ @@ -2126,7 +2310,8 @@ "RequireCheckIn":{ "shape":"CreateRequireCheckIn", "documentation":"

      Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into to make the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

      " - } + }, + "ProactiveJoin":{"shape":"CreateProactiveJoin"} }, "documentation":"

      Creates meeting room settings of a room profile.

      " }, @@ -2194,6 +2379,13 @@ } } }, + "CreateProactiveJoin":{ + "type":"structure", + "required":["EnabledByMotion"], + "members":{ + "EnabledByMotion":{"shape":"Boolean"} + } + }, "CreateProfileRequest":{ "type":"structure", "required":[ @@ -3040,7 +3232,7 @@ "type":"string", "max":128, "min":1, - "pattern":"([0-9a-zA-Z]([+-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z]([-\\w]*[0-9a-zA-Z]+)*\\.)+[a-zA-Z]{2,9})" + "pattern":"\\w[+-.\\w]*@\\w[\\w\\.\\-]+\\.[0-9a-zA-Z]{2,24}" }, "EnablementType":{ "type":"string", @@ -3974,7 +4166,8 @@ "RequireCheckIn":{ "shape":"RequireCheckIn", "documentation":"

      Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into. This makes the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

      " - } + }, + "ProactiveJoin":{"shape":"ProactiveJoin"} }, "documentation":"

      Meeting room settings of a room profile.

      " }, @@ -4220,6 +4413,12 @@ "sensitive":true }, "PrivacyPolicy":{"type":"string"}, + "ProactiveJoin":{ + "type":"structure", + "members":{ + "EnabledByMotion":{"shape":"Boolean"} + } + }, "ProductDescription":{"type":"string"}, "ProductId":{ "type":"string", @@ -5852,7 +6051,8 @@ "RequireCheckIn":{ "shape":"UpdateRequireCheckIn", "documentation":"

      Settings for requiring a check in when a room is reserved. Alexa can cancel a room reservation if it's not checked into to make the room available for others. Users can check in by joining the meeting with Alexa or an AVS device, or by saying “Alexa, check in.”

      " - } + }, + "ProactiveJoin":{"shape":"UpdateProactiveJoin"} }, "documentation":"

      Updates meeting room settings of a room profile.

      " }, @@ -5895,6 +6095,13 @@ "members":{ } }, + "UpdateProactiveJoin":{ + "type":"structure", + "required":["EnabledByMotion"], + "members":{ + "EnabledByMotion":{"shape":"Boolean"} + } + }, "UpdateProfileRequest":{ "type":"structure", "members":{ @@ -6105,5 +6312,7 @@ "pattern":"[a-zA-Z0-9@_+.-]*" } }, - "documentation":"

      Alexa for Business helps you use Alexa in your organization. Alexa for Business provides you with the tools to manage Alexa devices, enroll your users, and assign skills, at scale. You can build your own context-aware voice skills using the Alexa Skills Kit and the Alexa for Business API operations. You can also make these available as private skills for your organization. Alexa for Business makes it efficient to voice-enable your products and services, thus providing context-aware voice experiences for your customers. Device makers building with the Alexa Voice Service (AVS) can create fully integrated solutions, register their products with Alexa for Business, and manage them as shared devices in their organization.

      " + "documentation":"

      Alexa for Business has been retired and is no longer supported.

      ", + "deprecated":true, + "deprecatedMessage":"Alexa For Business is no longer supported" } diff --git a/services/amp/pom.xml b/services/amp/pom.xml index fd56a2401ad7..f10960f72908 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index b0edfb58cbd2..4af412c09926 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 7340e1f2401d..9665d70cda75 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 8b280c774932..195eca8be3d0 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json index 5389e0b4f801..236a7ed1983c 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -371,8 +371,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -384,8 +384,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -395,8 +395,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -408,8 +408,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -419,8 +419,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -432,8 +432,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -443,8 +443,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -456,8 +456,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -469,8 +469,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -482,8 +482,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -494,8 +494,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -506,8 +506,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json index c3557207f8f0..52f5e312e195 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json @@ -15,6 +15,12 @@ "output_token": "nextToken", "result_key": "entities" }, + "ListCodegenJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "entities" + }, "ListComponents": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json index 43019cf40016..a0b39beb017d 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json @@ -45,7 +45,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

      Creates a new form for an Amplify app.

      ", + "documentation":"

      Creates a new form for an Amplify.

      ", "idempotent":true }, "CreateTheme":{ @@ -173,6 +173,23 @@ ], "documentation":"

      Exports theme configurations to code that is ready to integrate into an Amplify app.

      " }, + "GetCodegenJob":{ + "name":"GetCodegenJob", + "http":{ + "method":"GET", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs/{id}", + "responseCode":200 + }, + "input":{"shape":"GetCodegenJobRequest"}, + "output":{"shape":"GetCodegenJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns an existing code generation job.

      " + }, "GetComponent":{ "name":"GetComponent", "http":{ @@ -236,6 +253,22 @@ ], "documentation":"

      Returns an existing theme for an Amplify app.

      " }, + "ListCodegenJobs":{ + "name":"ListCodegenJobs", + "http":{ + "method":"GET", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs", + "responseCode":200 + }, + "input":{"shape":"ListCodegenJobsRequest"}, + "output":{"shape":"ListCodegenJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Retrieves a list of code generation jobs for a specified Amplify app and backend environment.

      " + }, "ListComponents":{ "name":"ListComponents", "http":{ @@ -309,6 +342,22 @@ ], "documentation":"

      Refreshes a previously issued access token that might have expired.

      " }, + "StartCodegenJob":{ + "name":"StartCodegenJob", + "http":{ + "method":"POST", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs", + "responseCode":200 + }, + "input":{"shape":"StartCodegenJobRequest"}, + "output":{"shape":"StartCodegenJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Starts a code generation job for for a specified Amplify app and backend environment.

      " + }, "UpdateComponent":{ "name":"UpdateComponent", "http":{ @@ -404,10 +453,366 @@ }, "documentation":"

      Represents the event action configuration for an element of a Component or ComponentChild. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components. ActionParameters defines the action that is performed when an event occurs on the component.

      " }, + "AppId":{ + "type":"string", + "max":20, + "min":1, + "pattern":"d[a-z0-9]+" + }, + "AssociatedFieldsList":{ + "type":"list", + "member":{"shape":"String"} + }, "Boolean":{ "type":"boolean", "box":true }, + "CodegenFeatureFlags":{ + "type":"structure", + "members":{ + "isRelationshipSupported":{ + "shape":"Boolean", + "documentation":"

      Specifes whether a code generation job supports data relationships.

      " + }, + "isNonModelSupported":{ + "shape":"Boolean", + "documentation":"

      Specifies whether a code generation job supports non models.

      " + } + }, + "documentation":"

      Describes the feature flags that you can specify for a code generation job.

      " + }, + "CodegenGenericDataEnum":{ + "type":"structure", + "required":["values"], + "members":{ + "values":{ + "shape":"CodegenGenericDataEnumValuesList", + "documentation":"

      The list of enum values in the generic data schema.

      " + } + }, + "documentation":"

      Describes the enums in a generic data schema.

      " + }, + "CodegenGenericDataEnumValuesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "CodegenGenericDataEnums":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataEnum"} + }, + "CodegenGenericDataField":{ + "type":"structure", + "required":[ + "dataType", + "dataTypeValue", + "required", + "readOnly", + "isArray" + ], + "members":{ + "dataType":{ + "shape":"CodegenGenericDataFieldDataType", + "documentation":"

      The data type for the generic data field.

      " + }, + "dataTypeValue":{ + "shape":"String", + "documentation":"

      The value of the data type for the generic data field.

      " + }, + "required":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the generic data field is required.

      " + }, + "readOnly":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the generic data field is read-only.

      " + }, + "isArray":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the generic data field is an array.

      " + }, + "relationship":{ + "shape":"CodegenGenericDataRelationshipType", + "documentation":"

      The relationship of the generic data schema.

      " + } + }, + "documentation":"

      Describes a field in a generic data schema.

      " + }, + "CodegenGenericDataFieldDataType":{ + "type":"string", + "enum":[ + "ID", + "String", + "Int", + "Float", + "AWSDate", + "AWSTime", + "AWSDateTime", + "AWSTimestamp", + "AWSEmail", + "AWSURL", + "AWSIPAddress", + "Boolean", + "AWSJSON", + "AWSPhone", + "Enum", + "Model", + "NonModel" + ] + }, + "CodegenGenericDataFields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataField"} + }, + "CodegenGenericDataModel":{ + "type":"structure", + "required":[ + "fields", + "primaryKeys" + ], + "members":{ + "fields":{ + "shape":"CodegenGenericDataFields", + "documentation":"

      The fields in the generic data model.

      " + }, + "isJoinTable":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the generic data model is a join table.

      " + }, + "primaryKeys":{ + "shape":"CodegenPrimaryKeysList", + "documentation":"

      The primary keys of the generic data model.

      " + } + }, + "documentation":"

      Describes a model in a generic data schema.

      " + }, + "CodegenGenericDataModels":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataModel"} + }, + "CodegenGenericDataNonModel":{ + "type":"structure", + "required":["fields"], + "members":{ + "fields":{ + "shape":"CodegenGenericDataNonModelFields", + "documentation":"

      The fields in a generic data schema non model.

      " + } + }, + "documentation":"

      Describes a non-model in a generic data schema.

      " + }, + "CodegenGenericDataNonModelFields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataField"} + }, + "CodegenGenericDataNonModels":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataNonModel"} + }, + "CodegenGenericDataRelationshipType":{ + "type":"structure", + "required":[ + "type", + "relatedModelName" + ], + "members":{ + "type":{ + "shape":"GenericDataRelationshipType", + "documentation":"

      The data relationship type.

      " + }, + "relatedModelName":{ + "shape":"String", + "documentation":"

      The name of the related model in the data relationship.

      " + }, + "relatedModelFields":{ + "shape":"RelatedModelFieldsList", + "documentation":"

      The related model fields in the data relationship.

      " + }, + "canUnlinkAssociatedModel":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the relationship can unlink the associated model.

      " + }, + "relatedJoinFieldName":{ + "shape":"String", + "documentation":"

      The name of the related join field in the data relationship.

      " + }, + "relatedJoinTableName":{ + "shape":"String", + "documentation":"

      The name of the related join table in the data relationship.

      " + }, + "belongsToFieldOnRelatedModel":{ + "shape":"String", + "documentation":"

      The value of the belongsTo field on the related data model.

      " + }, + "associatedFields":{ + "shape":"AssociatedFieldsList", + "documentation":"

      The associated fields of the data relationship.

      " + }, + "isHasManyIndex":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the @index directive is supported for a hasMany data relationship.

      " + } + }, + "documentation":"

      Describes the relationship between generic data models.

      " + }, + "CodegenJob":{ + "type":"structure", + "required":[ + "id", + "appId", + "environmentName" + ], + "members":{ + "id":{ + "shape":"Uuid", + "documentation":"

      The unique ID for the code generation job.

      " + }, + "appId":{ + "shape":"AppId", + "documentation":"

      The ID of the Amplify app associated with the code generation job.

      " + }, + "environmentName":{ + "shape":"String", + "documentation":"

      The name of the backend environment associated with the code generation job.

      " + }, + "renderConfig":{"shape":"CodegenJobRenderConfig"}, + "genericDataSchema":{"shape":"CodegenJobGenericDataSchema"}, + "autoGenerateForms":{ + "shape":"Boolean", + "documentation":"

      Specifies whether to autogenerate forms in the code generation job.

      " + }, + "features":{"shape":"CodegenFeatureFlags"}, + "status":{ + "shape":"CodegenJobStatus", + "documentation":"

      The status of the code generation job.

      " + }, + "statusMessage":{ + "shape":"String", + "documentation":"

      The customized status message for the code generation job.

      " + }, + "asset":{ + "shape":"CodegenJobAsset", + "documentation":"

      The CodegenJobAsset to use for the code generation job.

      " + }, + "tags":{ + "shape":"Tags", + "documentation":"

      One or more key-value pairs to use when tagging the code generation job.

      " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      The time that the code generation job was created.

      " + }, + "modifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      The time that the code generation job was modified.

      " + } + }, + "documentation":"

      Describes the configuration for a code generation job that is associated with an Amplify app.

      " + }, + "CodegenJobAsset":{ + "type":"structure", + "members":{ + "downloadUrl":{ + "shape":"String", + "documentation":"

      The URL to use to access the asset.

      " + } + }, + "documentation":"

      Describes an asset for a code generation job.

      " + }, + "CodegenJobGenericDataSchema":{ + "type":"structure", + "required":[ + "dataSourceType", + "models", + "enums", + "nonModels" + ], + "members":{ + "dataSourceType":{ + "shape":"CodegenJobGenericDataSourceType", + "documentation":"

      The type of the data source for the schema. Currently, the only valid value is an Amplify DataStore.

      " + }, + "models":{ + "shape":"CodegenGenericDataModels", + "documentation":"

      The name of a CodegenGenericDataModel.

      " + }, + "enums":{ + "shape":"CodegenGenericDataEnums", + "documentation":"

      The name of a CodegenGenericDataEnum.

      " + }, + "nonModels":{ + "shape":"CodegenGenericDataNonModels", + "documentation":"

      The name of a CodegenGenericDataNonModel.

      " + } + }, + "documentation":"

      Describes the data schema for a code generation job.

      " + }, + "CodegenJobGenericDataSourceType":{ + "type":"string", + "enum":["DataStore"] + }, + "CodegenJobRenderConfig":{ + "type":"structure", + "members":{ + "react":{ + "shape":"ReactStartCodegenJobData", + "documentation":"

      The name of the ReactStartCodegenJobData object.

      " + } + }, + "documentation":"

      Describes the configuration information for rendering the UI component associated the code generation job.

      ", + "union":true + }, + "CodegenJobStatus":{ + "type":"string", + "enum":[ + "in_progress", + "failed", + "succeeded" + ] + }, + "CodegenJobSummary":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "id" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

      The unique ID of the Amplify app associated with the code generation job.

      " + }, + "environmentName":{ + "shape":"String", + "documentation":"

      The name of the backend environment associated with the code generation job.

      " + }, + "id":{ + "shape":"Uuid", + "documentation":"

      The unique ID for the code generation job summary.

      " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      The time that the code generation job summary was created.

      " + }, + "modifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      The time that the code generation job summary was modified.

      " + } + }, + "documentation":"

      A summary of the basic information about the code generation job.

      " + }, + "CodegenJobSummaryList":{ + "type":"list", + "member":{"shape":"CodegenJobSummary"} + }, + "CodegenPrimaryKeysList":{ + "type":"list", + "member":{"shape":"String"} + }, "Component":{ "type":"structure", "required":[ @@ -1913,6 +2318,52 @@ "type":"list", "member":{"shape":"FormSummary"} }, + "GenericDataRelationshipType":{ + "type":"string", + "enum":[ + "HAS_MANY", + "HAS_ONE", + "BELONGS_TO" + ] + }, + "GetCodegenJobRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "id" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

      The unique ID of the Amplify app associated with the code generation job.

      ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

      The name of the backend environment that is a part of the Amplify app associated with the code generation job.

      ", + "location":"uri", + "locationName":"environmentName" + }, + "id":{ + "shape":"Uuid", + "documentation":"

      The unique ID of the code generation job.

      ", + "location":"uri", + "locationName":"id" + } + } + }, + "GetCodegenJobResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"CodegenJob", + "documentation":"

      The configuration settings for the code generation job.

      " + } + }, + "payload":"job" + }, "GetComponentRequest":{ "type":"structure", "required":[ @@ -2088,6 +2539,28 @@ }, "exception":true }, + "JSModule":{ + "type":"string", + "enum":[ + "es2020", + "esnext" + ] + }, + "JSScript":{ + "type":"string", + "enum":[ + "jsx", + "tsx", + "js" + ] + }, + "JSTarget":{ + "type":"string", + "enum":[ + "es2015", + "es2020" + ] + }, "LabelDecorator":{ "type":"string", "enum":[ @@ -2096,6 +2569,59 @@ "none" ] }, + "ListCodegenJobsLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCodegenJobsRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

      The unique ID for the Amplify app.

      ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

      The name of the backend environment that is a part of the Amplify app.

      ", + "location":"uri", + "locationName":"environmentName" + }, + "nextToken":{ + "shape":"String", + "documentation":"

      The token to request the next page of results.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListCodegenJobsLimit", + "documentation":"

      The maximum number of jobs to retrieve.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListCodegenJobsResponse":{ + "type":"structure", + "required":["entities"], + "members":{ + "entities":{ + "shape":"CodegenJobSummaryList", + "documentation":"

      The list of code generation jobs for the Amplify app.

      " + }, + "nextToken":{ + "shape":"String", + "documentation":"

      The pagination token that's included if more results are available.

      " + } + } + }, "ListComponentsLimit":{ "type":"integer", "max":100, @@ -2362,6 +2888,32 @@ }, "payload":"body" }, + "ReactStartCodegenJobData":{ + "type":"structure", + "members":{ + "module":{ + "shape":"JSModule", + "documentation":"

      The JavaScript module type.

      " + }, + "target":{ + "shape":"JSTarget", + "documentation":"

      The ECMAScript specification to use.

      " + }, + "script":{ + "shape":"JSScript", + "documentation":"

      The file type to use for a JavaScript project.

      " + }, + "renderTypeDeclarations":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the code generation job should render type declaration files.

      " + }, + "inlineSourceMap":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the code generation job should render inline source maps.

      " + } + }, + "documentation":"

      Describes the code generation job configuration for a React project.

      " + }, "RefreshTokenRequest":{ "type":"structure", "required":[ @@ -2414,6 +2966,10 @@ } } }, + "RelatedModelFieldsList":{ + "type":"list", + "member":{"shape":"String"} + }, "ResourceConflictException":{ "type":"structure", "members":{ @@ -2519,6 +3075,77 @@ "type":"list", "member":{"shape":"SortProperty"} }, + "StartCodegenJobData":{ + "type":"structure", + "required":["renderConfig"], + "members":{ + "renderConfig":{ + "shape":"CodegenJobRenderConfig", + "documentation":"

      The code generation configuration for the codegen job.

      " + }, + "genericDataSchema":{ + "shape":"CodegenJobGenericDataSchema", + "documentation":"

      The data schema to use for a code generation job.

      " + }, + "autoGenerateForms":{ + "shape":"Boolean", + "documentation":"

      Specifies whether to autogenerate forms in the code generation job.

      " + }, + "features":{ + "shape":"CodegenFeatureFlags", + "documentation":"

      The feature flags for a code generation job.

      " + }, + "tags":{ + "shape":"Tags", + "documentation":"

      One or more key-value pairs to use when tagging the code generation job data.

      " + } + }, + "documentation":"

      The code generation job resource configuration.

      " + }, + "StartCodegenJobRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "codegenJobToCreate" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

      The unique ID for the Amplify app.

      ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

      The name of the backend environment that is a part of the Amplify app.

      ", + "location":"uri", + "locationName":"environmentName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

      The idempotency token used to ensure that the code generation job request completes only once.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "codegenJobToCreate":{ + "shape":"StartCodegenJobData", + "documentation":"

      The code generation job resource configuration.

      " + } + }, + "payload":"codegenJobToCreate" + }, + "StartCodegenJobResponse":{ + "type":"structure", + "members":{ + "entity":{ + "shape":"CodegenJob", + "documentation":"

      The code generation job for a UI component that is associated with an Amplify app.

      " + } + }, + "payload":"entity" + }, "StorageAccessLevel":{ "type":"string", "enum":[ @@ -2675,6 +3302,18 @@ "type":"list", "member":{"shape":"ThemeValues"} }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

      The request was denied due to request throttling.

      ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, "TokenProviders":{ "type":"string", "enum":["figma"] diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 3aa5daa32f08..5cca53ad0c5c 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index c21b2b3eecd9..ef9f0aa490f0 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 551fec283129..9a6f07a750cc 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 04ce95e19c64..dad6314cd41b 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 888e34211f87..5041e128f1b0 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 3b743cdeaf54..095ff1c138e1 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/service-2.json b/services/appflow/src/main/resources/codegen-resources/service-2.json index 6cfbd10169ab..63ccce2dd6a8 100644 --- a/services/appflow/src/main/resources/codegen-resources/service-2.json +++ b/services/appflow/src/main/resources/codegen-resources/service-2.json @@ -265,6 +265,22 @@ ], "documentation":"

      Registers a new custom connector with your Amazon Web Services account. Before you can register the connector, you must deploy the associated AWS lambda function in your account.

      " }, + "ResetConnectorMetadataCache":{ + "name":"ResetConnectorMetadataCache", + "http":{ + "method":"POST", + "requestUri":"/reset-connector-metadata-cache" + }, + "input":{"shape":"ResetConnectorMetadataCacheRequest"}, + "output":{"shape":"ResetConnectorMetadataCacheResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Resets metadata about your connector entities that Amazon AppFlow stored in its cache. Use this action when you want Amazon AppFlow to return the latest information about the data that you have in a source application.

      Amazon AppFlow returns metadata about your entities when you use the ListConnectorEntities or DescribeConnectorEntities actions. Following these actions, Amazon AppFlow caches the metadata to reduce the number of API requests that it must send to the source application. Amazon AppFlow automatically resets the cache once every hour, but you can use this action when you want to get the latest metadata right away.

      " + }, "StartFlow":{ "name":"StartFlow", "http":{ @@ -870,6 +886,14 @@ "registeredBy":{ "shape":"RegisteredBy", "documentation":"

      Information about who registered the connector.

      " + }, + "supportedDataTransferTypes":{ + "shape":"SupportedDataTransferTypeList", + "documentation":"

      The data transfer types that the connector supports.

      RECORD

      Structured records.

      FILE

      Files or binary data.

      " + }, + "supportedDataTransferApis":{ + "shape":"SupportedDataTransferApis", + "documentation":"

      The APIs of the connector application that Amazon AppFlow can use to transfer your data.

      " } }, "documentation":"

      The configuration settings related to a given connector.

      " @@ -930,6 +954,10 @@ "connectorModes":{ "shape":"ConnectorModeList", "documentation":"

      The connection mode that the connector supports.

      " + }, + "supportedDataTransferTypes":{ + "shape":"SupportedDataTransferTypeList", + "documentation":"

      The data transfer types that the connector supports.

      RECORD

      Structured records.

      FILE

      Files or binary data.

      " } }, "documentation":"

      Information about the registered connector.

      " @@ -1839,6 +1867,10 @@ "customProperties":{ "shape":"CustomProperties", "documentation":"

      Custom properties that are required to use the custom connector as a source.

      " + }, + "dataTransferApi":{ + "shape":"DataTransferApi", + "documentation":"

      The API of the connector application that Amazon AppFlow uses to transfer your data.

      " } }, "documentation":"

      The properties that are applied when the custom connector is being used as a source.

      " @@ -1894,6 +1926,33 @@ "Complete" ] }, + "DataTransferApi":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"DataTransferApiTypeName", + "documentation":"

      The name of the connector application API.

      " + }, + "Type":{ + "shape":"DataTransferApiType", + "documentation":"

      You can specify one of the following types:

      AUTOMATIC

      The default. Optimizes a flow for datasets that fluctuate in size from small to large. For each flow run, Amazon AppFlow chooses to use the SYNC or ASYNC API type based on the amount of data that the run transfers.

      SYNC

      A synchronous API. This type of API optimizes a flow for small to medium-sized datasets.

      ASYNC

      An asynchronous API. This type of API optimizes a flow for large datasets.

      " + } + }, + "documentation":"

      The API of the connector application that Amazon AppFlow uses to transfer your data.

      " + }, + "DataTransferApiType":{ + "type":"string", + "enum":[ + "SYNC", + "ASYNC", + "AUTOMATIC" + ] + }, + "DataTransferApiTypeName":{ + "type":"string", + "max":64, + "pattern":"[\\w/-]+" + }, "DatabaseName":{ "type":"string", "max":512, @@ -4024,6 +4083,36 @@ }, "documentation":"

      Describes the status of an attempt from Amazon AppFlow to register a resource.

      When you run a flow that you've configured to use a metadata catalog, Amazon AppFlow registers a metadata table and data partitions with that catalog. This operation provides the status of that registration attempt. The operation also indicates how many related resources Amazon AppFlow created or updated.

      " }, + "ResetConnectorMetadataCacheRequest":{ + "type":"structure", + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

      The name of the connector profile that you want to reset cached metadata for.

      You can omit this parameter if you're resetting the cache for any of the following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you must include this parameter in your request.

      " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

      The type of connector to reset cached metadata for.

      You must include this parameter in your request if you're resetting the cache for any of the following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you can omit this parameter from your request.

      " + }, + "connectorEntityName":{ + "shape":"EntityName", + "documentation":"

      Use this parameter if you want to reset cached metadata about the details for an individual entity.

      If you don't include this parameter in your request, Amazon AppFlow only resets cached metadata about entity names, not entity details.

      " + }, + "entitiesPath":{ + "shape":"EntitiesPath", + "documentation":"

      Use this parameter only if you’re resetting the cached metadata about a nested entity. Only some connectors support nested entities. A nested entity is one that has another entity as a parent. To use this parameter, specify the name of the parent entity.

      To look up the parent-child relationship of entities, you can send a ListConnectorEntities request that omits the entitiesPath parameter. Amazon AppFlow will return a list of top-level entities. For each one, it indicates whether the entity has nested entities. Then, in a subsequent ListConnectorEntities request, you can specify a parent entity name for the entitiesPath parameter. Amazon AppFlow will return a list of the child entities for that parent.

      " + }, + "apiVersion":{ + "shape":"ApiVersion", + "documentation":"

      The API version that you specified in the connector profile that you’re resetting cached metadata for. You must use this parameter only if the connector supports multiple API versions or if the connector type is CustomConnector.

      To look up how many versions a connector supports, use the DescribeConnectors action. In the response, find the value that Amazon AppFlow returns for the connectorVersion parameter.

      To look up the connector type, use the DescribeConnectorProfiles action. In the response, find the value that Amazon AppFlow returns for the connectorType parameter.

      To look up the API version that you specified in a connector profile, use the DescribeConnectorProfiles action.

      " + } + } + }, + "ResetConnectorMetadataCacheResponse":{ + "type":"structure", + "members":{ + } + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -4995,6 +5084,21 @@ "type":"list", "member":{"shape":"SupportedApiVersion"} }, + "SupportedDataTransferApis":{ + "type":"list", + "member":{"shape":"DataTransferApi"} + }, + "SupportedDataTransferType":{ + "type":"string", + "enum":[ + "RECORD", + "FILE" + ] + }, + "SupportedDataTransferTypeList":{ + "type":"list", + "member":{"shape":"SupportedDataTransferType"} + }, "SupportedFieldTypeDetails":{ "type":"structure", "required":["v1"], diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 7341c359435c..0ce273415e5e 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 68822d9a7f5f..3a6ad9d90240 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json index 9e882cc49714..99b4f5d78520 100644 --- a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json @@ -692,7 +692,8 @@ "ElastiCacheReplicaEngineCPUUtilization", "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage", "NeptuneReaderAverageCPUUtilization", - "SageMakerVariantProvisionedConcurrencyUtilization" + "SageMakerVariantProvisionedConcurrencyUtilization", + "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" ] }, "MetricUnit":{"type":"string"}, @@ -887,7 +888,7 @@ }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

      The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

      For the following resources, the minimum value allowed is 0.

      • AppStream 2.0 fleets

      • Aurora DB clusters

      • ECS services

      • EMR clusters

      • Lambda provisioned concurrency

      • SageMaker Serverless endpoint provisioned concurrency

      • SageMaker endpoint variants

      • Spot Fleets

      • custom resources

      It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

      For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

      " + "documentation":"

      The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target.

      For the following resources, the minimum value allowed is 0.

      • AppStream 2.0 fleets

      • Aurora DB clusters

      • ECS services

      • EMR clusters

      • Lambda provisioned concurrency

      • SageMaker endpoint variants

      • SageMaker Serverless endpoint provisioned concurrency

      • Spot Fleets

      • custom resources

      It's strongly recommended that you specify a value greater than 0. A value greater than 0 means that data points are continuously reported to CloudWatch that scaling policies can use to scale on a metric like average CPU utilization.

      For all other resources, the minimum allowed value depends on the type of resource that you are using. If you provide a value that is lower than what a resource can accept, an error occurs. In which case, the error message will provide the minimum value that the resource can accept.

      " }, "MaxCapacity":{ "shape":"ResourceCapacity", @@ -1286,7 +1287,7 @@ }, "ScalingAdjustment":{ "shape":"ScalingAdjustment", - "documentation":"

      The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a positive value.

      " + "documentation":"

      The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a non-negative value.

      " } }, "documentation":"

      Represents a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

      For the following examples, suppose that you have an alarm with a breach threshold of 50:

      • To initiate the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

      • To initiate the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

      There are a few rules for the step adjustments for your step policy:

      • The ranges of your step adjustments can't overlap or have a gap.

      • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

      • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

      • The upper and lower bound can't be null in the same step adjustment.

      " @@ -1312,14 +1313,14 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

      The amount of time, in seconds, to wait for a previous scaling activity to take effect.

      With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

      With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

      Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

      • AppStream 2.0 fleets

      • Aurora DB clusters

      • ECS services

      • EMR clusters

      • Neptune clusters

      • SageMaker Serverless endpoint provisioned concurrency

      • SageMaker endpoint variants

      • Spot Fleets

      • Custom resources

      For all other scalable targets, the default value is 0:

      • Amazon Comprehend document classification and entity recognizer endpoints

      • DynamoDB tables and global secondary indexes

      • Amazon Keyspaces tables

      • Lambda provisioned concurrency

      • Amazon MSK broker storage

      " + "documentation":"

      The amount of time, in seconds, to wait for a previous scaling activity to take effect. If not specified, the default value is 300. For more information, see Cooldown period in the Application Auto Scaling User Guide.

      " }, "MetricAggregationType":{ "shape":"MetricAggregationType", "documentation":"

      The aggregation type for the CloudWatch metrics. Valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

      " } }, - "documentation":"

      Represents a step scaling policy configuration to use with Application Auto Scaling.

      " + "documentation":"

      Represents a step scaling policy configuration to use with Application Auto Scaling.

      For more information, see Step scaling policies in the Application Auto Scaling User Guide.

      " }, "SuspendedState":{ "type":"structure", @@ -1523,18 +1524,18 @@ }, "ScaleOutCooldown":{ "shape":"Cooldown", - "documentation":"

      The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

      With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't increase the desired capacity again unless either a larger scale out is triggered or the cooldown period ends. While the cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

      Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

      • AppStream 2.0 fleets

      • Aurora DB clusters

      • ECS services

      • EMR clusters

      • Neptune clusters

      • SageMaker Serverless endpoint provisioned concurrency

      • SageMaker endpoint variants

      • Spot Fleets

      • Custom resources

      For all other scalable targets, the default value is 0:

      • Amazon Comprehend document classification and entity recognizer endpoints

      • DynamoDB tables and global secondary indexes

      • Amazon Keyspaces tables

      • Lambda provisioned concurrency

      • Amazon MSK broker storage

      " + "documentation":"

      The amount of time, in seconds, to wait for a previous scale-out activity to take effect. For more information and for default values, see Define cooldown periods in the Application Auto Scaling User Guide.

      " }, "ScaleInCooldown":{ "shape":"Cooldown", - "documentation":"

      The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

      With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

      Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups and a default value of 300 for the following scalable targets:

      • AppStream 2.0 fleets

      • Aurora DB clusters

      • ECS services

      • EMR clusters

      • Neptune clusters

      • SageMaker Serverless endpoint provisioned concurrency

      • SageMaker endpoint variants

      • Spot Fleets

      • Custom resources

      For all other scalable targets, the default value is 0:

      • Amazon Comprehend document classification and entity recognizer endpoints

      • DynamoDB tables and global secondary indexes

      • Amazon Keyspaces tables

      • Lambda provisioned concurrency

      • Amazon MSK broker storage

      " + "documentation":"

      The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start. For more information and for default values, see Define cooldown periods in the Application Auto Scaling User Guide.

      " }, "DisableScaleIn":{ "shape":"DisableScaleIn", "documentation":"

      Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable target. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable target. The default value is false.

      " } }, - "documentation":"

      Represents a target tracking scaling policy configuration to use with Application Auto Scaling.

      " + "documentation":"

      Represents a target tracking scaling policy configuration to use with Application Auto Scaling.

      For more information, see Target tracking scaling policies in the Application Auto Scaling User Guide.

      " }, "TimestampType":{"type":"timestamp"}, "TooManyTagsException":{ @@ -1584,5 +1585,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

      With Application Auto Scaling, you can configure automatic scaling for the following resources:

      • Amazon AppStream 2.0 fleets

      • Amazon Aurora Replicas

      • Amazon Comprehend document classification and entity recognizer endpoints

      • Amazon DynamoDB tables and global secondary indexes throughput capacity

      • Amazon ECS services

      • Amazon ElastiCache for Redis clusters (replication groups)

      • Amazon EMR clusters

      • Amazon Keyspaces (for Apache Cassandra) tables

      • Lambda function provisioned concurrency

      • Amazon Managed Streaming for Apache Kafka broker storage

      • Amazon Neptune clusters

      • Amazon SageMaker Serverless endpoint provisioned concurrency

      • Amazon SageMaker endpoint variants

      • Spot Fleets (Amazon EC2)

      • Custom resources provided by your own applications or services

      To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

      API Summary

      The Application Auto Scaling service API includes three key sets of actions:

      • Register and manage scalable targets - Register Amazon Web Services or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

      • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

      • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

      " + "documentation":"

      With Application Auto Scaling, you can configure automatic scaling for the following resources:

      • Amazon AppStream 2.0 fleets

      • Amazon Aurora Replicas

      • Amazon Comprehend document classification and entity recognizer endpoints

      • Amazon DynamoDB tables and global secondary indexes throughput capacity

      • Amazon ECS services

      • Amazon ElastiCache for Redis clusters (replication groups)

      • Amazon EMR clusters

      • Amazon Keyspaces (for Apache Cassandra) tables

      • Lambda function provisioned concurrency

      • Amazon Managed Streaming for Apache Kafka broker storage

      • Amazon Neptune clusters

      • Amazon SageMaker endpoint variants

      • Amazon SageMaker Serverless endpoint provisioned concurrency

      • Spot Fleets (Amazon EC2)

      • Custom resources provided by your own applications or services

      To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide.

      API Summary

      The Application Auto Scaling service API includes three key sets of actions:

      • Register and manage scalable targets - Register Amazon Web Services or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

      • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

      • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

      " } diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index ed366a2c6701..0b18732fe8a4 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 94ede9c2f568..4ada98c7d520 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json index 9251fb9e4984..b745570fa9cb 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://discovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://discovery-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://discovery.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://discovery.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://discovery.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json index 7f10fcad8c95..ec4122fe2f9c 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://discovery-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-central-1.amazonaws.com" + "url": "https://discovery.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.eu-central-1.api.aws" + "url": "https://discovery.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -47,48 +34,48 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-west-2.api.aws" + "url": "https://discovery.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-2" + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-west-2.amazonaws.com" + "url": "https://discovery.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-2" + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-west-2.api.aws" + "url": "https://discovery.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -99,282 +86,274 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-2.api.aws" + "url": "https://discovery-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "eu-west-2" + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-2.amazonaws.com" + "url": "https://discovery-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://discovery.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-2.amazonaws.com" + "url": "https://discovery.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-1.api.aws" + "url": "https://discovery-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-1.amazonaws.com" + "url": "https://discovery-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-1.api.aws" + "url": "https://discovery.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-1.amazonaws.com" + "url": "https://discovery.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-northeast-1.api.aws" + "url": "https://discovery-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-northeast-1.amazonaws.com" + "url": "https://discovery-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.ap-northeast-1.api.aws" + "url": "https://discovery.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.ap-northeast-1.amazonaws.com" + "url": "https://discovery.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-southeast-2.amazonaws.com" + "url": "https://discovery-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.ap-southeast-2.amazonaws.com" + "url": "https://discovery.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-east-1.amazonaws.com" + "url": "https://discovery-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-east-1.api.aws" + "url": "https://discovery.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -384,9 +363,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -396,11 +375,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json b/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json index 322a66a3ec95..3d97897979a5 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json @@ -131,7 +131,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Lists agents or connectors as specified by ID or other filters. All agents/connectors associated with your user account can be listed if you call DescribeAgents as is without passing any parameters.

      " + "documentation":"

      Lists agents or collectors as specified by ID or other filters. All agents/collectors associated with your user can be listed if you call DescribeAgents as is without passing any parameters.

      " }, "DescribeConfigurations":{ "name":"DescribeConfigurations", @@ -167,7 +167,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Lists exports as specified by ID. All continuous exports associated with your user account can be listed if you call DescribeContinuousExports as is without passing any parameters.

      " + "documentation":"

      Lists exports as specified by ID. All continuous exports associated with your user can be listed if you call DescribeContinuousExports as is without passing any parameters.

      " }, "DescribeExportConfigurations":{ "name":"DescribeExportConfigurations", @@ -238,7 +238,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters.

      There are three valid tag filter names:

      • tagKey

      • tagValue

      • configurationId

      Also, all configuration items associated with your user account that have tags can be listed if you call DescribeTags as is without passing any parameters.

      " + "documentation":"

      Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters.

      There are three valid tag filter names:

      • tagKey

      • tagValue

      • configurationId

      Also, all configuration items associated with your user that have tags can be listed if you call DescribeTags as is without passing any parameters.

      " }, "DisassociateConfigurationItemsFromApplication":{ "name":"DisassociateConfigurationItemsFromApplication", @@ -362,7 +362,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Instructs the specified agents or connectors to start collecting data.

      " + "documentation":"

      Instructs the specified agents to start collecting data.

      " }, "StartExportTask":{ "name":"StartExportTask", @@ -380,7 +380,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Begins the export of discovered data to an S3 bucket.

      If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

      If you do not include an agentIds filter, summary data is exported that includes both Amazon Web Services Agentless Discovery Connector data and summary data from Amazon Web Services Discovery Agents. Export of summary data is limited to two exports per day.

      " + "documentation":"

      Begins the export of a discovered data report to an Amazon S3 bucket managed by Amazon Web Services.

      Exports might provide an estimate of fees and savings based on certain information that you provide. Fee estimates do not include any taxes that might apply. Your actual fees and savings depend on a variety of factors, including your actual usage of Amazon Web Services services, which might vary from the estimates provided in this report.

      If you do not specify preferences or agentIds in the filter, a summary of all servers, applications, tags, and performance is generated. This data is an aggregation of all server data collected through on-premises tooling, file import, application grouping and applying tags.

      If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports. Export of detailed agent data is limited to two exports per day.

      If you enable ec2RecommendationsPreferences in preferences , an Amazon EC2 instance matching the characteristics of each server in Application Discovery Service is generated. Changing the attributes of the ec2RecommendationsPreferences changes the criteria of the recommendation.

      " }, "StartImportTask":{ "name":"StartImportTask", @@ -398,7 +398,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Starts an import task, which allows you to import details of your on-premises environment directly into Amazon Web Services Migration Hub without having to use the Application Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

      To start an import request, do this:

      1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

      2. Fill out the template with your server and application data.

      3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

      4. Use the console or the StartImportTask command with the Amazon Web Services CLI or one of the Amazon Web Services SDKs to import the records from your file.

      For more information, including step-by-step procedures, see Migration Hub Import in the Amazon Web Services Application Discovery Service User Guide.

      There are limits to the number of import tasks you can create (and delete) in an Amazon Web Services account. For more information, see Amazon Web Services Application Discovery Service Limits in the Amazon Web Services Application Discovery Service User Guide.

      " + "documentation":"

      Starts an import task, which allows you to import details of your on-premises environment directly into Amazon Web Services Migration Hub without having to use the Amazon Web Services Application Discovery Service (Application Discovery Service) tools such as the Amazon Web Services Application Discovery Service Agentless Collector or Application Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

      To start an import request, do this:

      1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

      2. Fill out the template with your server and application data.

      3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

      4. Use the console or the StartImportTask command with the Amazon Web Services CLI or one of the Amazon Web Services SDKs to import the records from your file.

      For more information, including step-by-step procedures, see Migration Hub Import in the Amazon Web Services Application Discovery Service User Guide.

      There are limits to the number of import tasks you can create (and delete) in an Amazon Web Services account. For more information, see Amazon Web Services Application Discovery Service Limits in the Amazon Web Services Application Discovery Service User Guide.

      " }, "StopContinuousExport":{ "name":"StopContinuousExport", @@ -435,7 +435,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

      Instructs the specified agents or connectors to stop collecting data.

      " + "documentation":"

      Instructs the specified agents to stop collecting data.

      " }, "UpdateApplication":{ "name":"UpdateApplication", @@ -461,18 +461,18 @@ "members":{ "agentId":{ "shape":"String", - "documentation":"

      The agent/connector ID.

      " + "documentation":"

      The agent ID.

      " }, "operationSucceeded":{ "shape":"Boolean", - "documentation":"

      Information about the status of the StartDataCollection and StopDataCollection operations. The system has recorded the data collection operation. The agent/connector receives this command the next time it polls for a new command.

      " + "documentation":"

      Information about the status of the StartDataCollection and StopDataCollection operations. The system has recorded the data collection operation. The agent receives this command the next time it polls for a new command.

      " }, "description":{ "shape":"String", "documentation":"

      A description of the operation performed.

      " } }, - "documentation":"

      Information about agents or connectors that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation, and whether the agent/connector configuration was updated.

      " + "documentation":"

      Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation, and whether the agent configuration was updated.

      " }, "AgentConfigurationStatusList":{ "type":"list", @@ -493,15 +493,15 @@ "members":{ "agentId":{ "shape":"AgentId", - "documentation":"

      The agent or connector ID.

      " + "documentation":"

      The agent or collector ID.

      " }, "hostName":{ "shape":"String", - "documentation":"

      The name of the host where the agent or connector resides. The host can be a server or virtual machine.

      " + "documentation":"

      The name of the host where the agent or collector resides. The host can be a server or virtual machine.

      " }, "agentNetworkInfoList":{ "shape":"AgentNetworkInfoList", - "documentation":"

      Network details about the host where the agent or connector resides.

      " + "documentation":"

      Network details about the host where the agent or collector resides.

      " }, "connectorId":{ "shape":"String", @@ -509,19 +509,19 @@ }, "version":{ "shape":"String", - "documentation":"

      The agent or connector version.

      " + "documentation":"

      The agent or collector version.

      " }, "health":{ "shape":"AgentStatus", - "documentation":"

      The health of the agent or connector.

      " + "documentation":"

      The health of the agent.

      " }, "lastHealthPingTime":{ "shape":"String", - "documentation":"

      Time since agent or connector health was reported.

      " + "documentation":"

      Time since agent health was reported.

      " }, "collectionStatus":{ "shape":"String", - "documentation":"

      Status of the collection process for an agent or connector.

      " + "documentation":"

      Status of the collection process for an agent.

      " }, "agentType":{ "shape":"String", @@ -532,21 +532,21 @@ "documentation":"

      Agent's first registration timestamp in UTC.

      " } }, - "documentation":"

      Information about agents or connectors associated with the user’s Amazon Web Services account. Information includes agent or connector IDs, IP addresses, media access control (MAC) addresses, agent or connector health, hostname where the agent or connector resides, and agent version for each agent.

      " + "documentation":"

      Information about agents associated with the user’s Amazon Web Services account. Information includes agent IDs, IP addresses, media access control (MAC) addresses, agent or collector status, hostname where the agent resides, and agent version for each agent.

      " }, "AgentNetworkInfo":{ "type":"structure", "members":{ "ipAddress":{ "shape":"String", - "documentation":"

      The IP address for the host where the agent/connector resides.

      " + "documentation":"

      The IP address for the host where the agent/collector resides.

      " }, "macAddress":{ "shape":"String", - "documentation":"

      The MAC address for the host where the agent/connector resides.

      " + "documentation":"

      The MAC address for the host where the agent/collector resides.

      " } }, - "documentation":"

      Network details about the host where the agent/connector resides.

      " + "documentation":"

      Network details about the host where the agent/collector resides.

      " }, "AgentNetworkInfoList":{ "type":"list", @@ -613,7 +613,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

      The Amazon Web Services user account does not have permission to perform the action. Check the IAM policy associated with this account.

      ", + "documentation":"

      The user does not have permission to perform the action. Check the IAM policy associated with this user.

      ", "exception":true }, "BatchDeleteImportDataError":{ @@ -765,7 +765,7 @@ }, "statusDetail":{ "shape":"StringMax255", - "documentation":"

      Contains information about any errors that have occurred. This data type can have the following values:

      • ACCESS_DENIED - You don’t have permission to start Data Exploration in Amazon Athena. Contact your Amazon Web Services administrator for help. For more information, see Setting Up Amazon Web Services Application Discovery Service in the Application Discovery Service User Guide.

      • DELIVERY_STREAM_LIMIT_FAILURE - You reached the limit for Amazon Kinesis Data Firehose delivery streams. Reduce the number of streams or request a limit increase and try again. For more information, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

      • FIREHOSE_ROLE_MISSING - The Data Exploration feature is in an error state because your IAM User is missing the AWSApplicationDiscoveryServiceFirehose role. Turn on Data Exploration in Amazon Athena and try again. For more information, see Step 3: Provide Application Discovery Service Access to Non-Administrator Users by Attaching Policies in the Application Discovery Service User Guide.

      • FIREHOSE_STREAM_DOES_NOT_EXIST - The Data Exploration feature is in an error state because your IAM User is missing one or more of the Kinesis data delivery streams.

      • INTERNAL_FAILURE - The Data Exploration feature is in an error state because of an internal failure. Try again later. If this problem persists, contact Amazon Web Services Support.

      • LAKE_FORMATION_ACCESS_DENIED - You don't have sufficient lake formation permissions to start continuous export. For more information, see Upgrading Amazon Web Services Glue Data Permissions to the Amazon Web Services Lake Formation Model in the Amazon Web Services Lake Formation Developer Guide.

        You can use one of the following two ways to resolve this issue.

        1. If you don’t want to use the Lake Formation permission model, you can change the default Data Catalog settings to use only Amazon Web Services Identity and Access Management (IAM) access control for new databases. For more information, see Change Data Catalog Settings in the Lake Formation Developer Guide.

        2. You can give the service-linked IAM roles AWSServiceRoleForApplicationDiscoveryServiceContinuousExport and AWSApplicationDiscoveryServiceFirehose the required Lake Formation permissions. For more information, see Granting Database Permissions in the Lake Formation Developer Guide.

          1. AWSServiceRoleForApplicationDiscoveryServiceContinuousExport - Grant database creator permissions, which gives the role database creation ability and implicit permissions for any created tables. For more information, see Implicit Lake Formation Permissions in the Lake Formation Developer Guide.

          2. AWSApplicationDiscoveryServiceFirehose - Grant describe permissions for all tables in the database.

      • S3_BUCKET_LIMIT_FAILURE - You reached the limit for Amazon S3 buckets. Reduce the number of S3 buckets or request a limit increase and try again. For more information, see Bucket Restrictions and Limitations in the Amazon Simple Storage Service Developer Guide.

      • S3_NOT_SIGNED_UP - Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3.

      " + "documentation":"

      Contains information about any errors that have occurred. This data type can have the following values:

      • ACCESS_DENIED - You don’t have permission to start Data Exploration in Amazon Athena. Contact your Amazon Web Services administrator for help. For more information, see Setting Up Amazon Web Services Application Discovery Service in the Application Discovery Service User Guide.

      • DELIVERY_STREAM_LIMIT_FAILURE - You reached the limit for Amazon Kinesis Data Firehose delivery streams. Reduce the number of streams or request a limit increase and try again. For more information, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

      • FIREHOSE_ROLE_MISSING - The Data Exploration feature is in an error state because your user is missing the Amazon Web ServicesApplicationDiscoveryServiceFirehose role. Turn on Data Exploration in Amazon Athena and try again. For more information, see Creating the Amazon Web ServicesApplicationDiscoveryServiceFirehose Role in the Application Discovery Service User Guide.

      • FIREHOSE_STREAM_DOES_NOT_EXIST - The Data Exploration feature is in an error state because your user is missing one or more of the Kinesis data delivery streams.

      • INTERNAL_FAILURE - The Data Exploration feature is in an error state because of an internal failure. Try again later. If this problem persists, contact Amazon Web Services Support.

      • LAKE_FORMATION_ACCESS_DENIED - You don't have sufficient lake formation permissions to start continuous export. For more information, see Upgrading Amazon Web Services Glue Data Permissions to the Amazon Web Services Lake Formation Model in the Amazon Web Services Lake Formation Developer Guide.

        You can use one of the following two ways to resolve this issue.

        1. If you don’t want to use the Lake Formation permission model, you can change the default Data Catalog settings to use only Amazon Web Services Identity and Access Management (IAM) access control for new databases. For more information, see Change Data Catalog Settings in the Lake Formation Developer Guide.

        2. You can give the service-linked IAM roles AWSServiceRoleForApplicationDiscoveryServiceContinuousExport and AWSApplicationDiscoveryServiceFirehose the required Lake Formation permissions. For more information, see Granting Database Permissions in the Lake Formation Developer Guide.

          1. AWSServiceRoleForApplicationDiscoveryServiceContinuousExport - Grant database creator permissions, which gives the role database creation ability and implicit permissions for any created tables. For more information, see Implicit Lake Formation Permissions in the Lake Formation Developer Guide.

          2. AWSApplicationDiscoveryServiceFirehose - Grant describe permissions for all tables in the database.

      • S3_BUCKET_LIMIT_FAILURE - You reached the limit for Amazon S3 buckets. Reduce the number of S3 buckets or request a limit increase and try again. For more information, see Bucket Restrictions and Limitations in the Amazon Simple Storage Service Developer Guide.

      • S3_NOT_SIGNED_UP - Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3.

      " }, "s3Bucket":{ "shape":"S3Bucket", @@ -910,14 +910,36 @@ "unknownAgentlessCollectors" ], "members":{ - "activeAgentlessCollectors":{"shape":"Integer"}, - "healthyAgentlessCollectors":{"shape":"Integer"}, - "denyListedAgentlessCollectors":{"shape":"Integer"}, - "shutdownAgentlessCollectors":{"shape":"Integer"}, - "unhealthyAgentlessCollectors":{"shape":"Integer"}, - "totalAgentlessCollectors":{"shape":"Integer"}, - "unknownAgentlessCollectors":{"shape":"Integer"} - } + "activeAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of active Agentless Collector collectors.

      " + }, + "healthyAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of healthy Agentless Collector collectors.

      " + }, + "denyListedAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of deny-listed Agentless Collector collectors.

      " + }, + "shutdownAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of Agentless Collector collectors with SHUTDOWN status.

      " + }, + "unhealthyAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of unhealthy Agentless Collector collectors.

      " + }, + "totalAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The total number of Agentless Collector collectors.

      " + }, + "unknownAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

      The number of unknown Agentless Collector collectors.

      " + } + }, + "documentation":"

      The inventory data for installed Agentless Collector collectors.

      " }, "CustomerConnectorInfo":{ "type":"structure", @@ -1053,7 +1075,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

      The agent or the Connector IDs for which you want information. If you specify no IDs, the system returns information about all agents/Connectors associated with your Amazon Web Services user account.

      " + "documentation":"

      The agent or the collector IDs for which you want information. If you specify no IDs, the system returns information about all agents/collectors associated with your user.

      " }, "filters":{ "shape":"Filters", @@ -1061,7 +1083,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

      The total number of agents/Connectors to return in a single page of output. The maximum value is 100.

      " + "documentation":"

      The total number of agents/collectors to return in a single page of output. The maximum value is 100.

      " }, "nextToken":{ "shape":"NextToken", @@ -1074,7 +1096,7 @@ "members":{ "agentsInfo":{ "shape":"AgentsInfo", - "documentation":"

      Lists agents or the Connector by ID or lists all agents/Connectors associated with your user account if you did not specify an agent/Connector ID. The output includes agent/Connector IDs, IP addresses, media access control (MAC) addresses, agent/Connector health, host name where the agent/Connector resides, and the version number of each agent/Connector.

      " + "documentation":"

      Lists agents or the collector by ID or lists all agents/collectors associated with your user, if you did not specify an agent/collector ID. The output includes agent/collector IDs, IP addresses, media access control (MAC) addresses, agent/collector health, host name where the agent/collector resides, and the version number of each agent/collector.

      " }, "nextToken":{ "shape":"NextToken", @@ -1302,6 +1324,50 @@ "members":{ } }, + "EC2InstanceType":{ + "type":"string", + "max":25, + "min":1, + "pattern":"[a-zA-Z0-9\\d\\.\\-]+" + }, + "Ec2RecommendationsExportPreferences":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"ExportEnabled", + "documentation":"

      If set to true, the export preferences is set to Ec2RecommendationsExportPreferences.

      " + }, + "cpuPerformanceMetricBasis":{ + "shape":"UsageMetricBasis", + "documentation":"

      The recommended EC2 instance type that matches the CPU usage metric of server performance data.

      " + }, + "ramPerformanceMetricBasis":{ + "shape":"UsageMetricBasis", + "documentation":"

      The recommended EC2 instance type that matches the Memory usage metric of server performance data.

      " + }, + "tenancy":{ + "shape":"Tenancy", + "documentation":"

      The target tenancy to use for your recommended EC2 instances.

      " + }, + "excludedInstanceTypes":{ + "shape":"ExcludedInstanceTypes", + "documentation":"

      An array of instance types to exclude from recommendations.

      " + }, + "preferredRegion":{ + "shape":"UserPreferredRegion", + "documentation":"

      The target Amazon Web Services Region for the recommendations. You can use any of the Region codes available for the chosen service, as listed in Amazon Web Services service endpoints in the Amazon Web Services General Reference.

      " + }, + "reservedInstanceOptions":{ + "shape":"ReservedInstanceOptions", + "documentation":"

      The contract type for a reserved instance. If blank, we assume an On-Demand instance is preferred.

      " + } + }, + "documentation":"

      Indicates that the exported data must include EC2 instance type matches for on-premises servers that are discovered through Amazon Web Services Application Discovery Service.

      " + }, + "ExcludedInstanceTypes":{ + "type":"list", + "member":{"shape":"EC2InstanceType"} + }, "ExportConfigurationsResponse":{ "type":"structure", "members":{ @@ -1313,15 +1379,13 @@ }, "ExportDataFormat":{ "type":"string", - "enum":[ - "CSV", - "GRAPHML" - ] + "enum":["CSV"] }, "ExportDataFormats":{ "type":"list", "member":{"shape":"ExportDataFormat"} }, + "ExportEnabled":{"type":"boolean"}, "ExportFilter":{ "type":"structure", "required":[ @@ -1397,6 +1461,17 @@ }, "documentation":"

      Information regarding the export status of discovered data. The value is an array of objects.

      " }, + "ExportPreferences":{ + "type":"structure", + "members":{ + "ec2RecommendationsPreferences":{ + "shape":"Ec2RecommendationsExportPreferences", + "documentation":"

      If enabled, exported data includes EC2 instance type matches for on-premises servers discovered through Amazon Web Services Application Discovery Service.

      " + } + }, + "documentation":"

      Indicates the type of data that is being exported. Only one ExportPreferences can be enabled for a StartExportTask action.

      ", + "union":true + }, "ExportRequestTime":{"type":"timestamp"}, "ExportStatus":{ "type":"string", @@ -1488,7 +1563,10 @@ "shape":"CustomerMeCollectorInfo", "documentation":"

      Details about Migration Evaluator collectors, including collector status and health.

      " }, - "agentlessCollectorSummary":{"shape":"CustomerAgentlessCollectorInfo"} + "agentlessCollectorSummary":{ + "shape":"CustomerAgentlessCollectorInfo", + "documentation":"

      Details about Agentless Collector collectors, including status.

      " + } } }, "HomeRegionNotSetException":{ @@ -1496,7 +1574,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

      The home region is not set. Set the home region to continue.

      ", + "documentation":"

      The home Region is not set. Set the home Region to continue.

      ", "exception":true }, "ImportStatus":{ @@ -1765,6 +1843,13 @@ "member":{"shape":"NeighborConnectionDetail"} }, "NextToken":{"type":"string"}, + "OfferingClass":{ + "type":"string", + "enum":[ + "STANDARD", + "CONVERTIBLE" + ] + }, "OperationNotPermittedException":{ "type":"structure", "members":{ @@ -1797,6 +1882,37 @@ "type":"list", "member":{"shape":"OrderByElement"} }, + "PurchasingOption":{ + "type":"string", + "enum":[ + "ALL_UPFRONT", + "PARTIAL_UPFRONT", + "NO_UPFRONT" + ] + }, + "ReservedInstanceOptions":{ + "type":"structure", + "required":[ + "purchasingOption", + "offeringClass", + "termLength" + ], + "members":{ + "purchasingOption":{ + "shape":"PurchasingOption", + "documentation":"

      The payment plan to use for your Reserved Instance.

      " + }, + "offeringClass":{ + "shape":"OfferingClass", + "documentation":"

      The flexibility to change the instance types needed for your Reserved Instance.

      " + }, + "termLength":{ + "shape":"TermLength", + "documentation":"

      The preferred duration of the Reserved Instance term.

      " + } + }, + "documentation":"

      Used to provide Reserved Instance preferences for the recommendation.

      " + }, "ResourceInUseException":{ "type":"structure", "members":{ @@ -1865,7 +1981,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

      The IDs of the agents or connectors from which to start collecting data. If you send a request to an agent/connector ID that you do not have permission to contact, according to your Amazon Web Services account, the service does not throw an exception. Instead, it returns the error in the Description field. If you send a request to multiple agents/connectors and you do not have permission to contact some of those agents/connectors, the system does not throw an exception. Instead, the system shows Failed in the Description field.

      " + "documentation":"

      The IDs of the agents from which to start collecting data. If you send a request to an agent ID that you do not have permission to contact, according to your Amazon Web Services account, the service does not throw an exception. Instead, it returns the error in the Description field. If you send a request to multiple agents and you do not have permission to contact some of those agents, the system does not throw an exception. Instead, the system shows Failed in the Description field.

      " } } }, @@ -1874,7 +1990,7 @@ "members":{ "agentsConfigurationStatus":{ "shape":"AgentConfigurationStatusList", - "documentation":"

      Information about agents or the connector that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

      " + "documentation":"

      Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation performed, and whether the agent configuration was updated.

      " } } }, @@ -1887,7 +2003,7 @@ }, "filters":{ "shape":"ExportFilters", - "documentation":"

      If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and exported data includes both Agentless Discovery Connector data and summary data from Application Discovery agents.

      " + "documentation":"

      If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and exported data includes both Amazon Web Services Application Discovery Service Agentless Collector collectors data and summary data from Application Discovery Agent agents.

      " }, "startTime":{ "shape":"TimeStamp", @@ -1896,6 +2012,10 @@ "endTime":{ "shape":"TimeStamp", "documentation":"

      The end timestamp for exported data from the single Application Discovery Agent selected in the filters. If no value is specified, exported data includes the most recent data collected by the agent.

      " + }, + "preferences":{ + "shape":"ExportPreferences", + "documentation":"

      Indicates the type of data that needs to be exported. Only one ExportPreferences can be enabled at any time.

      " } } }, @@ -1968,7 +2088,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

      The IDs of the agents or connectors from which to stop collecting data.

      " + "documentation":"

      The IDs of the agents from which to stop collecting data.

      " } } }, @@ -1977,7 +2097,7 @@ "members":{ "agentsConfigurationStatus":{ "shape":"AgentConfigurationStatusList", - "documentation":"

      Information about the agents or connector that were instructed to stop collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

      " + "documentation":"

      Information about the agents that were instructed to stop collecting data. Information includes the agent ID, a description of the operation performed, and whether the agent configuration was updated.

      " } } }, @@ -2038,6 +2158,20 @@ "member":{"shape":"Tag"} }, "TagValue":{"type":"string"}, + "Tenancy":{ + "type":"string", + "enum":[ + "DEDICATED", + "SHARED" + ] + }, + "TermLength":{ + "type":"string", + "enum":[ + "ONE_YEAR", + "THREE_YEAR" + ] + }, "TimeStamp":{"type":"timestamp"}, "ToDeleteIdentifierList":{ "type":"list", @@ -2068,6 +2202,35 @@ "members":{ } }, + "UsageMetricBasis":{ + "type":"structure", + "members":{ + "name":{ + "shape":"UsageMetricBasisName", + "documentation":"

      A utilization metric that is used by the recommendations.

      " + }, + "percentageAdjust":{ + "shape":"UsageMetricPercentageAdjust", + "documentation":"

      Specifies the percentage of the specified utilization metric that is used by the recommendations.

      " + } + }, + "documentation":"

      Specifies the performance metrics to use for the server that is used for recommendations.

      " + }, + "UsageMetricBasisName":{ + "type":"string", + "pattern":"^(p(\\d{1,2}|100)|AVG|SPEC|MAX)$" + }, + "UsageMetricPercentageAdjust":{ + "type":"double", + "max":100.0, + "min":0.0 + }, + "UserPreferredRegion":{ + "type":"string", + "max":30, + "min":1, + "pattern":"[a-z]{2}-[a-z\\-]+-[0-9]+" + }, "orderString":{ "type":"string", "enum":[ @@ -2076,5 +2239,5 @@ ] } }, - "documentation":"Amazon Web Services Application Discovery Service

      Amazon Web Services Application Discovery Service helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the Amazon Web Services Application Discovery Service FAQ. Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers:

      • Agentless discovery is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. It does not work in non-VMware environments.

        • Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment.

        • Agentless discovery doesn't collect information about network dependencies, only agent-based discovery collects that information.

      • Agent-based discovery collects a richer set of data than agentless discovery by using the Amazon Web Services Application Discovery Agent, which you install on one or more hosts in your data center.

        • The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies.

        • The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.

      • Amazon Web Services Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Migration Hub without using the discovery connector or discovery agent.

        • Third-party application discovery tools can query Amazon Web Services Application Discovery Service, and they can write to the Application Discovery Service database using the public API.

        • In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations.

      Recommendations

      We recommend that you use agent-based discovery for non-VMware environments, and whenever you want to collect information about network dependencies. You can run agent-based and agentless discovery simultaneously. Use agentless discovery to complete the initial infrastructure assessment quickly, and then install agents on select hosts to collect additional information.

      Working With This Guide

      This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

      • Remember that you must set your Migration Hub home region before you call any of these APIs.

      • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned.

      • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region.

      • Although it is unlikely, the Migration Hub home region could change. If you call APIs outside the home region, an InvalidInputException is returned.

      • You must call GetHomeRegion to obtain the latest Migration Hub home region.

      This guide is intended for use with the Amazon Web Services Application Discovery Service User Guide.

      All data is handled according to the Amazon Web Services Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

      " + "documentation":"Amazon Web Services Application Discovery Service

      Amazon Web Services Application Discovery Service (Application Discovery Service) helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the Amazon Web Services Application Discovery Service FAQ.

      Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers:

      • Agentless discovery using Amazon Web Services Application Discovery Service Agentless Collector (Agentless Collector), which doesn't require you to install an agent on each host.

        • Agentless Collector gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment.

        • Agentless Collector doesn't collect information about network dependencies, only agent-based discovery collects that information.

      • Agent-based discovery using the Amazon Web Services Application Discovery Agent (Application Discovery Agent) collects a richer set of data than agentless discovery, which you install on one or more hosts in your data center.

        • The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies.

        • The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the Amazon Web Services cloud. For more information, see Amazon Web Services Application Discovery Agent.

      • Amazon Web Services Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Amazon Web Services Migration Hub (Migration Hub) without using Agentless Collector or Application Discovery Agent.

        • Third-party application discovery tools can query Amazon Web Services Application Discovery Service, and they can write to the Application Discovery Service database using the public API.

        • In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations.

      Working With This Guide

      This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

      • Remember that you must set your Migration Hub home Region before you call any of these APIs.

      • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home Region, or a HomeRegionNotSetException error is returned.

      • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home Region.

      • Although it is unlikely, the Migration Hub home Region could change. If you call APIs outside the home Region, an InvalidInputException is returned.

      • You must call GetHomeRegion to obtain the latest Migration Hub home Region.

      This guide is intended for use with the Amazon Web Services Application Discovery Service User Guide.

      All data is handled according to the Amazon Web Services Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

      " } diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 8eff91ba1f1f..d759452969f1 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 67c91c274ca8..539d955472f2 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 0ae911dfc035..1afb12b53ec8 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index b4dd8ccb4192..13194ff1b589 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index cf039a07d4c3..51c50388517e 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 7b8e479483e3..2ffe0aae6850 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -29,6 +29,42 @@ ], "documentation":"

      Maps an endpoint to your custom domain.

      " }, + "AssociateMergedGraphqlApi":{ + "name":"AssociateMergedGraphqlApi", + "http":{ + "method":"POST", + "requestUri":"/v1/sourceApis/{sourceApiIdentifier}/mergedApiAssociations" + }, + "input":{"shape":"AssociateMergedGraphqlApiRequest"}, + "output":{"shape":"AssociateMergedGraphqlApiResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Creates an association between a Merged API and source API using the source API's identifier.

      " + }, + "AssociateSourceGraphqlApi":{ + "name":"AssociateSourceGraphqlApi", + "http":{ + "method":"POST", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations" + }, + "input":{"shape":"AssociateSourceGraphqlApiRequest"}, + "output":{"shape":"AssociateSourceGraphqlApiResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Creates an association between a Merged API and source API using the Merged API's identifier.

      " + }, "CreateApiCache":{ "name":"CreateApiCache", "http":{ @@ -318,6 +354,40 @@ ], "documentation":"

      Removes an ApiAssociation object from a custom domain.

      " }, + "DisassociateMergedGraphqlApi":{ + "name":"DisassociateMergedGraphqlApi", + "http":{ + "method":"DELETE", + "requestUri":"/v1/sourceApis/{sourceApiIdentifier}/mergedApiAssociations/{associationId}" + }, + "input":{"shape":"DisassociateMergedGraphqlApiRequest"}, + "output":{"shape":"DisassociateMergedGraphqlApiResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Deletes an association between a Merged API and source API using the source API's identifier and the association ID.

      " + }, + "DisassociateSourceGraphqlApi":{ + "name":"DisassociateSourceGraphqlApi", + "http":{ + "method":"DELETE", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}" + }, + "input":{"shape":"DisassociateSourceGraphqlApiRequest"}, + "output":{"shape":"DisassociateSourceGraphqlApiResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Deletes an association between a Merged API and source API using the Merged API's identifier and the association ID.

      " + }, "EvaluateCode":{ "name":"EvaluateCode", "http":{ @@ -510,6 +580,22 @@ ], "documentation":"

      Retrieves the current status of a schema creation operation.

      " }, + "GetSourceApiAssociation":{ + "name":"GetSourceApiAssociation", + "http":{ + "method":"GET", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}" + }, + "input":{"shape":"GetSourceApiAssociationRequest"}, + "output":{"shape":"GetSourceApiAssociationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

      Retrieves a SourceApiAssociation object.

      " + }, "GetType":{ "name":"GetType", "http":{ @@ -637,6 +723,22 @@ ], "documentation":"

      List the resolvers that are associated with a specific function.

      " }, + "ListSourceApiAssociations":{ + "name":"ListSourceApiAssociations", + "http":{ + "method":"GET", + "requestUri":"/v1/apis/{apiId}/sourceApiAssociations" + }, + "input":{"shape":"ListSourceApiAssociationsRequest"}, + "output":{"shape":"ListSourceApiAssociationsResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

      Lists the SourceApiAssociationSummary data.

      " + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -672,6 +774,23 @@ ], "documentation":"

      Lists the types for a given API.

      " }, + "ListTypesByAssociation":{ + "name":"ListTypesByAssociation", + "http":{ + "method":"GET", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}/types" + }, + "input":{"shape":"ListTypesByAssociationRequest"}, + "output":{"shape":"ListTypesByAssociationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

      Lists Type objects by the source API association ID.

      " + }, "StartSchemaCreation":{ "name":"StartSchemaCreation", "http":{ @@ -689,6 +808,23 @@ ], "documentation":"

      Adds a new schema to your GraphQL API.

      This operation is asynchronous. Use to determine when it has completed.

      " }, + "StartSchemaMerge":{ + "name":"StartSchemaMerge", + "http":{ + "method":"POST", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}/merge" + }, + "input":{"shape":"StartSchemaMergeRequest"}, + "output":{"shape":"StartSchemaMergeResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Initiates a merge operation. Returns a status that shows the result of the merge operation.

      " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -845,6 +981,23 @@ ], "documentation":"

      Updates a Resolver object.

      " }, + "UpdateSourceApiAssociation":{ + "name":"UpdateSourceApiAssociation", + "http":{ + "method":"POST", + "requestUri":"/v1/mergedApis/{mergedApiIdentifier}/sourceApiAssociations/{associationId}" + }, + "input":{"shape":"UpdateSourceApiAssociationRequest"}, + "output":{"shape":"UpdateSourceApiAssociationResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

      Updates some of the configuration choices of a particular source API association.

      " + }, "UpdateType":{ "name":"UpdateType", "http":{ @@ -1087,6 +1240,78 @@ } } }, + "AssociateMergedGraphqlApiRequest":{ + "type":"structure", + "required":[ + "sourceApiIdentifier", + "mergedApiIdentifier" + ], + "members":{ + "sourceApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Source API. This is generated by the AppSync service. In most cases, source APIs (especially in your account) only require the API ID value or ARN of the source API. However, source APIs from other accounts (cross-account use cases) strictly require the full resource ARN of the source API.

      ", + "location":"uri", + "locationName":"sourceApiIdentifier" + }, + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      The description field.

      " + }, + "sourceApiAssociationConfig":{ + "shape":"SourceApiAssociationConfig", + "documentation":"

      The SourceApiAssociationConfig object data.

      " + } + } + }, + "AssociateMergedGraphqlApiResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociation":{ + "shape":"SourceApiAssociation", + "documentation":"

      The SourceApiAssociation object data.

      " + } + } + }, + "AssociateSourceGraphqlApiRequest":{ + "type":"structure", + "required":[ + "mergedApiIdentifier", + "sourceApiIdentifier" + ], + "members":{ + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + }, + "sourceApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Source API. This is generated by the AppSync service. In most cases, source APIs (especially in your account) only require the API ID value or ARN of the source API. However, source APIs from other accounts (cross-account use cases) strictly require the full resource ARN of the source API.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      The description field.

      " + }, + "sourceApiAssociationConfig":{ + "shape":"SourceApiAssociationConfig", + "documentation":"

      The SourceApiAssociationConfig object data.

      " + } + } + }, + "AssociateSourceGraphqlApiResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociation":{ + "shape":"SourceApiAssociation", + "documentation":"

      The SourceApiAssociation object data.

      " + } + } + }, "AssociationStatus":{ "type":"string", "enum":[ @@ -1574,6 +1799,18 @@ "visibility":{ "shape":"GraphQLApiVisibility", "documentation":"

      Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created.

      " + }, + "apiType":{ + "shape":"GraphQLApiType", + "documentation":"

      The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED).

      " + }, + "mergedApiExecutionRoleArn":{ + "shape":"String", + "documentation":"

      The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically.

      " + }, + "ownerContact":{ + "shape":"String", + "documentation":"

      The owner contact information for an API resource.

      This field accepts any string input with a length of 0 - 256 characters.

      " } } }, @@ -1762,6 +1999,7 @@ "type":"list", "member":{"shape":"DataSource"} }, + "Date":{"type":"timestamp"}, "DefaultAction":{ "type":"string", "enum":[ @@ -2000,6 +2238,66 @@ "members":{ } }, + "DisassociateMergedGraphqlApiRequest":{ + "type":"structure", + "required":[ + "sourceApiIdentifier", + "associationId" + ], + "members":{ + "sourceApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Source API. This is generated by the AppSync service. In most cases, source APIs (especially in your account) only require the API ID value or ARN of the source API. However, source APIs from other accounts (cross-account use cases) strictly require the full resource ARN of the source API.

      ", + "location":"uri", + "locationName":"sourceApiIdentifier" + }, + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + } + } + }, + "DisassociateMergedGraphqlApiResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociationStatus":{ + "shape":"SourceApiAssociationStatus", + "documentation":"

      The state of the source API association.

      " + } + } + }, + "DisassociateSourceGraphqlApiRequest":{ + "type":"structure", + "required":[ + "mergedApiIdentifier", + "associationId" + ], + "members":{ + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + }, + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + } + } + }, + "DisassociateSourceGraphqlApiResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociationStatus":{ + "shape":"SourceApiAssociationStatus", + "documentation":"

      The state of the source API association.

      " + } + } + }, "DomainName":{ "type":"string", "max":253, @@ -2531,6 +2829,36 @@ } } }, + "GetSourceApiAssociationRequest":{ + "type":"structure", + "required":[ + "mergedApiIdentifier", + "associationId" + ], + "members":{ + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + }, + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + } + } + }, + "GetSourceApiAssociationResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociation":{ + "shape":"SourceApiAssociation", + "documentation":"

      The SourceApiAssociation object data.

      " + } + } + }, "GetTypeRequest":{ "type":"structure", "required":[ @@ -2568,6 +2896,13 @@ } } }, + "GraphQLApiType":{ + "type":"string", + "enum":[ + "GRAPHQL", + "MERGED" + ] + }, "GraphQLApiVisibility":{ "type":"string", "enum":[ @@ -2646,6 +2981,22 @@ "visibility":{ "shape":"GraphQLApiVisibility", "documentation":"

      Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created.

      " + }, + "apiType":{ + "shape":"GraphQLApiType", + "documentation":"

      The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED).

      " + }, + "mergedApiExecutionRoleArn":{ + "shape":"String", + "documentation":"

      The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically.

      " + }, + "owner":{ + "shape":"String", + "documentation":"

      The account owner of the GraphQL API.

      " + }, + "ownerContact":{ + "shape":"String", + "documentation":"

      The owner contact information for an API resource.

      This field accepts any string input with a length of 0 - 256 characters.

      " } }, "documentation":"

      Describes a GraphQL API.

      " @@ -2806,7 +3157,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

      The API token.

      ", + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      ", "location":"querystring", "locationName":"nextToken" }, @@ -2827,7 +3178,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

      The API token.

      " + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      " } } }, @@ -2882,6 +3233,18 @@ "documentation":"

      The maximum number of results that you want the request to return.

      ", "location":"querystring", "locationName":"maxResults" + }, + "apiType":{ + "shape":"GraphQLApiType", + "documentation":"

      The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED).

      ", + "location":"querystring", + "locationName":"apiType" + }, + "owner":{ + "shape":"Ownership", + "documentation":"

      The account owner of the GraphQL API.

      ", + "location":"querystring", + "locationName":"owner" } } }, @@ -2990,6 +3353,43 @@ } } }, + "ListSourceApiAssociationsRequest":{ + "type":"structure", + "required":["apiId"], + "members":{ + "apiId":{ + "shape":"String", + "documentation":"

      The API ID.

      ", + "location":"uri", + "locationName":"apiId" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results that you want the request to return.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSourceApiAssociationsResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociationSummaries":{ + "shape":"SourceApiAssociationSummaryList", + "documentation":"

      The SourceApiAssociationSummary object data.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      " + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -3011,6 +3411,59 @@ } } }, + "ListTypesByAssociationRequest":{ + "type":"structure", + "required":[ + "mergedApiIdentifier", + "associationId", + "format" + ], + "members":{ + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + }, + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + }, + "format":{ + "shape":"TypeDefinitionFormat", + "documentation":"

      The format type.

      ", + "location":"querystring", + "locationName":"format" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results that you want the request to return.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListTypesByAssociationResponse":{ + "type":"structure", + "members":{ + "types":{ + "shape":"TypeList", + "documentation":"

      The Type objects.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list.

      " + } + } + }, "ListTypesRequest":{ "type":"structure", "required":[ @@ -3105,6 +3558,13 @@ "max":25, "min":0 }, + "MergeType":{ + "type":"string", + "enum":[ + "MANUAL_MERGE", + "AUTO_MERGE" + ] + }, "NotFoundException":{ "type":"structure", "members":{ @@ -3162,6 +3622,13 @@ "JSON" ] }, + "Ownership":{ + "type":"string", + "enum":[ + "CURRENT_ACCOUNT", + "OTHER_ACCOUNTS" + ] + }, "PaginationToken":{ "type":"string", "max":65536, @@ -3315,6 +3782,117 @@ "NOT_APPLICABLE" ] }, + "SourceApiAssociation":{ + "type":"structure", + "members":{ + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      " + }, + "associationArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the source API association.

      " + }, + "sourceApiId":{ + "shape":"String", + "documentation":"

      The ID of the AppSync source API.

      " + }, + "sourceApiArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the AppSync source API.

      " + }, + "mergedApiArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the AppSync Merged API.

      " + }, + "mergedApiId":{ + "shape":"String", + "documentation":"

      The ID of the AppSync Merged API.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      The description field.

      " + }, + "sourceApiAssociationConfig":{ + "shape":"SourceApiAssociationConfig", + "documentation":"

      The SourceApiAssociationConfig object data.

      " + }, + "sourceApiAssociationStatus":{ + "shape":"SourceApiAssociationStatus", + "documentation":"

      The state of the source API association.

      " + }, + "sourceApiAssociationStatusDetail":{ + "shape":"String", + "documentation":"

      The detailed message related to the current state of the source API association.

      " + }, + "lastSuccessfulMergeDate":{ + "shape":"Date", + "documentation":"

      The datetime value of the last successful merge of the source API association. The result will be in UTC format and your local time zone.

      " + } + }, + "documentation":"

      Describes the configuration of a source API. A source API is a GraphQL API that is linked to a merged API. There can be multiple source APIs attached to each merged API. When linked to a merged API, the source API's schema, data sources, and resolvers will be combined with other linked source API data to form a new, singular API.

      Source APIs can originate from your account or from other accounts via Amazon Web Services Resource Access Manager. For more information about sharing resources from other accounts, see What is Amazon Web Services Resource Access Manager? in the Amazon Web Services Resource Access Manager guide.

      " + }, + "SourceApiAssociationConfig":{ + "type":"structure", + "members":{ + "mergeType":{ + "shape":"MergeType", + "documentation":"

      The property that indicates which merging option is enabled in the source API association.

      Valid merge types are MANUAL_MERGE (default) and AUTO_MERGE. Manual merges are the default behavior and require the user to trigger any changes from the source APIs to the merged API manually. Auto merges subscribe the merged API to the changes performed on the source APIs so that any change in the source APIs are also made to the merged API. Auto merges use MergedApiExecutionRoleArn to perform merge operations.

      " + } + }, + "documentation":"

      Describes properties used to specify configurations related to a source API.

      " + }, + "SourceApiAssociationStatus":{ + "type":"string", + "enum":[ + "MERGE_SCHEDULED", + "MERGE_FAILED", + "MERGE_SUCCESS", + "MERGE_IN_PROGRESS", + "AUTO_MERGE_SCHEDULE_FAILED", + "DELETION_SCHEDULED", + "DELETION_IN_PROGRESS", + "DELETION_FAILED" + ] + }, + "SourceApiAssociationSummary":{ + "type":"structure", + "members":{ + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      " + }, + "associationArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the source API association.

      " + }, + "sourceApiId":{ + "shape":"String", + "documentation":"

      The ID of the AppSync source API.

      " + }, + "sourceApiArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the AppSync Source API.

      " + }, + "mergedApiId":{ + "shape":"String", + "documentation":"

      The ID of the AppSync Merged API.

      " + }, + "mergedApiArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) of the AppSync Merged API.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      The description field.

      " + } + }, + "documentation":"

      Describes the ARNs and IDs of associations, Merged APIs, and source APIs.

      " + }, + "SourceApiAssociationSummaryList":{ + "type":"list", + "member":{"shape":"SourceApiAssociationSummary"} + }, "StartSchemaCreationRequest":{ "type":"structure", "required":[ @@ -3343,6 +3921,36 @@ } } }, + "StartSchemaMergeRequest":{ + "type":"structure", + "required":[ + "associationId", + "mergedApiIdentifier" + ], + "members":{ + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + }, + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + } + } + }, + "StartSchemaMergeResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociationStatus":{ + "shape":"SourceApiAssociationStatus", + "documentation":"

      The state of the source API association.

      " + } + } + }, "String":{"type":"string"}, "SyncConfig":{ "type":"structure", @@ -3780,6 +4388,14 @@ "lambdaAuthorizerConfig":{ "shape":"LambdaAuthorizerConfig", "documentation":"

      Configuration for Lambda function authorization.

      " + }, + "mergedApiExecutionRoleArn":{ + "shape":"String", + "documentation":"

      The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically.

      " + }, + "ownerContact":{ + "shape":"String", + "documentation":"

      The owner contact information for an API resource.

      This field accepts any string input with a length of 0 - 256 characters.

      " } } }, @@ -3866,6 +4482,44 @@ } } }, + "UpdateSourceApiAssociationRequest":{ + "type":"structure", + "required":[ + "associationId", + "mergedApiIdentifier" + ], + "members":{ + "associationId":{ + "shape":"String", + "documentation":"

      The ID generated by the AppSync service for the source API association.

      ", + "location":"uri", + "locationName":"associationId" + }, + "mergedApiIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the AppSync Merged API. This is generated by the AppSync service. In most cases, Merged APIs (especially in your account) only require the API ID value or ARN of the merged API. However, Merged APIs in other accounts (cross-account use cases) strictly require the full resource ARN of the merged API.

      ", + "location":"uri", + "locationName":"mergedApiIdentifier" + }, + "description":{ + "shape":"String", + "documentation":"

      The description field.

      " + }, + "sourceApiAssociationConfig":{ + "shape":"SourceApiAssociationConfig", + "documentation":"

      The SourceApiAssociationConfig object data.

      " + } + } + }, + "UpdateSourceApiAssociationResponse":{ + "type":"structure", + "members":{ + "sourceApiAssociation":{ + "shape":"SourceApiAssociation", + "documentation":"

      The SourceApiAssociation object data.

      " + } + } + }, "UpdateTypeRequest":{ "type":"structure", "required":[ diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index 0b2cca451962..26549fb53450 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/athena/pom.xml b/services/athena/pom.xml index 627ac442ef7b..3b1fc5cca087 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/athena/src/main/resources/codegen-resources/service-2.json b/services/athena/src/main/resources/codegen-resources/service-2.json index 4fd58b6caeea..fd85814b4648 100644 --- a/services/athena/src/main/resources/codegen-resources/service-2.json +++ b/services/athena/src/main/resources/codegen-resources/service-2.json @@ -66,7 +66,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Cancels the capacity reservation with the specified name.

      ", + "documentation":"

      Cancels the capacity reservation with the specified name. Cancelled reservations remain in your account and will be deleted 45 days after cancellation. During the 45 days, you cannot re-purpose or reuse a reservation that has been cancelled, but you can refer to its tags and view it for historical reference.

      ", "idempotent":true }, "CreateCapacityReservation":{ @@ -171,6 +171,21 @@ ], "documentation":"

      Creates a workgroup with the specified name. A workgroup can be an Apache Spark enabled workgroup or an Athena SQL workgroup.

      " }, + "DeleteCapacityReservation":{ + "name":"DeleteCapacityReservation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCapacityReservationInput"}, + "output":{"shape":"DeleteCapacityReservationOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes a cancelled capacity reservation. A reservation must be cancelled before it can be deleted. A deleted reservation is immediately removed from your account and can no longer be referenced, including by its ARN. A deleted reservation cannot be called by GetCapacityReservation, and deleted reservations do not appear in the output of ListCapacityReservations.

      ", + "idempotent":true + }, "DeleteDataCatalog":{ "name":"DeleteDataCatalog", "http":{ @@ -1756,7 +1771,7 @@ "documentation":"

      The KMS key that is used to encrypt the user's data stores in Athena.

      " } }, - "documentation":"

      Specifies the KMS key that is used to encrypt the user's data stores in Athena.

      " + "documentation":"

      Specifies the KMS key that is used to encrypt the user's data stores in Athena. This setting does not apply to Athena SQL workgroups.

      " }, "DataCatalog":{ "type":"structure", @@ -1855,6 +1870,21 @@ "max":1, "min":1 }, + "DeleteCapacityReservationInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CapacityReservationName", + "documentation":"

      The name of the capacity reservation to delete.

      " + } + } + }, + "DeleteCapacityReservationOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteDataCatalogInput":{ "type":"structure", "required":["Name"], @@ -3528,7 +3558,7 @@ }, "ExecutionParameters":{ "shape":"ExecutionParameters", - "documentation":"

      A list of values for the parameters in a query. The values are applied sequentially to the parameters in the query in the order in which the parameters occur.

      " + "documentation":"

      A list of values for the parameters in a query. The values are applied sequentially to the parameters in the query in the order in which the parameters occur. The list of parameters is not returned in the response.

      " }, "SubstatementType":{ "shape":"String", @@ -4757,7 +4787,7 @@ }, "CustomerContentEncryptionConfiguration":{ "shape":"CustomerContentEncryptionConfiguration", - "documentation":"

      Specifies the KMS key that is used to encrypt the user's data stores in Athena.

      " + "documentation":"

      Specifies the KMS key that is used to encrypt the user's data stores in Athena. This setting does not apply to Athena SQL workgroups.

      " }, "EnableMinimumEncryptionConfiguration":{ "shape":"BoxedBoolean", @@ -4799,7 +4829,7 @@ }, "RemoveCustomerContentEncryptionConfiguration":{ "shape":"BoxedBoolean", - "documentation":"

      Removes content encryption configuration for a workgroup.

      " + "documentation":"

      Removes content encryption configuration from an Apache Spark-enabled Athena workgroup.

      " }, "AdditionalConfiguration":{ "shape":"NameString", diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 8d5e0f55834a..692f66f9bf65 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json b/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json index 7b3557950f12..6b6545622735 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -295,8 +295,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +319,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -321,8 +343,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +367,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -347,8 +391,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +404,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -373,8 +417,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -385,8 +429,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -397,10 +441,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json index 7bef4e5f657c..8420122febbb 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -104,9 +104,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Uploads one or more pieces of evidence to a control in an Audit Manager assessment. You can upload manual evidence from any Amazon Simple Storage Service (Amazon S3) bucket by specifying the S3 URI of the evidence.

      You must upload manual evidence to your S3 bucket before you can upload it to your assessment. For instructions, see CreateBucket and PutObject in the Amazon Simple Storage Service API Reference.

      The following restrictions apply to this action:

      • Maximum size of an individual evidence file: 100 MB

      • Number of daily manual evidence uploads per control: 100

      • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

      For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

      " + "documentation":"

      Adds one or more pieces of evidence to a control in an Audit Manager assessment.

      You can import manual evidence from any S3 bucket by specifying the S3 URI of the object. You can also upload a file from your browser, or enter plain text in response to a risk assessment question.

      The following restrictions apply to this action:

      • manualEvidence can be only one of the following: evidenceFileName, s3ResourcePath, or textResponse

      • Maximum size of an individual evidence file: 100 MB

      • Number of daily manual evidence uploads per control: 100

      • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

      For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

      " }, "CreateAssessment":{ "name":"CreateAssessment", @@ -253,7 +254,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Deletes a custom control in Audit Manager.

      " + "documentation":"

      Deletes a custom control in Audit Manager.

      When you invoke this operation, the custom control is deleted from any frameworks or assessments that it’s currently part of. As a result, Audit Manager will stop collecting evidence for that custom control in all of your assessments. This includes assessments that you previously created before you deleted the custom control.

      " }, "DeregisterAccount":{ "name":"DeregisterAccount", @@ -314,7 +315,7 @@ "errors":[ {"shape":"InternalServerException"} ], - "documentation":"

      Returns the registration status of an account in Audit Manager.

      " + "documentation":"

      Gets the registration status of an account in Audit Manager.

      " }, "GetAssessment":{ "name":"GetAssessment", @@ -330,7 +331,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns an assessment from Audit Manager.

      " + "documentation":"

      Gets information about a specified assessment.

      " }, "GetAssessmentFramework":{ "name":"GetAssessmentFramework", @@ -346,7 +347,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a framework from Audit Manager.

      " + "documentation":"

      Gets information about a specified framework.

      " }, "GetAssessmentReportUrl":{ "name":"GetAssessmentReportUrl", @@ -362,7 +363,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Returns the URL of an assessment report in Audit Manager.

      " + "documentation":"

      Gets the URL of an assessment report in Audit Manager.

      " }, "GetChangeLogs":{ "name":"GetChangeLogs", @@ -378,7 +379,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a list of changelogs from Audit Manager.

      " + "documentation":"

      Gets a list of changelogs from Audit Manager.

      " }, "GetControl":{ "name":"GetControl", @@ -394,7 +395,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a control from Audit Manager.

      " + "documentation":"

      Gets information about a specified control.

      " }, "GetDelegations":{ "name":"GetDelegations", @@ -409,7 +410,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a list of delegations from an audit owner to a delegate.

      " + "documentation":"

      Gets a list of delegations from an audit owner to a delegate.

      " }, "GetEvidence":{ "name":"GetEvidence", @@ -425,7 +426,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns evidence from Audit Manager.

      " + "documentation":"

      Gets information about a specified evidence item.

      " }, "GetEvidenceByEvidenceFolder":{ "name":"GetEvidenceByEvidenceFolder", @@ -441,7 +442,23 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns all evidence from a specified evidence folder in Audit Manager.

      " + "documentation":"

      Gets all evidence from a specified evidence folder in Audit Manager.

      " + }, + "GetEvidenceFileUploadUrl":{ + "name":"GetEvidenceFileUploadUrl", + "http":{ + "method":"GET", + "requestUri":"/evidenceFileUploadUrl" + }, + "input":{"shape":"GetEvidenceFileUploadUrlRequest"}, + "output":{"shape":"GetEvidenceFileUploadUrlResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Creates a presigned Amazon S3 URL that can be used to upload a file as manual evidence. For instructions on how to use this operation, see Upload a file from your browser in the Audit Manager User Guide.

      The following restrictions apply to this operation:

      • Maximum size of an individual evidence file: 100 MB

      • Number of daily manual evidence uploads per control: 100

      • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

      For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

      " }, "GetEvidenceFolder":{ "name":"GetEvidenceFolder", @@ -457,7 +474,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns an evidence folder from the specified assessment in Audit Manager.

      " + "documentation":"

      Gets an evidence folder from a specified assessment in Audit Manager.

      " }, "GetEvidenceFoldersByAssessment":{ "name":"GetEvidenceFoldersByAssessment", @@ -473,7 +490,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns the evidence folders from a specified assessment in Audit Manager.

      " + "documentation":"

      Gets the evidence folders from a specified assessment in Audit Manager.

      " }, "GetEvidenceFoldersByAssessmentControl":{ "name":"GetEvidenceFoldersByAssessmentControl", @@ -489,7 +506,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a list of evidence folders that are associated with a specified control in an Audit Manager assessment.

      " + "documentation":"

      Gets a list of evidence folders that are associated with a specified control in an Audit Manager assessment.

      " }, "GetInsights":{ "name":"GetInsights", @@ -535,7 +552,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Returns the name of the delegated Amazon Web Services administrator account for the organization.

      " + "documentation":"

      Gets the name of the delegated Amazon Web Services administrator account for a specified organization.

      " }, "GetServicesInScope":{ "name":"GetServicesInScope", @@ -550,7 +567,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope.

      " + "documentation":"

      Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope.

      " }, "GetSettings":{ "name":"GetSettings", @@ -564,7 +581,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Returns the settings for the specified Amazon Web Services account.

      " + "documentation":"

      Gets the settings for a specified Amazon Web Services account.

      " }, "ListAssessmentControlInsightsByControlDomain":{ "name":"ListAssessmentControlInsightsByControlDomain", @@ -1651,7 +1668,7 @@ }, "destination":{ "shape":"S3Url", - "documentation":"

      The destination of the assessment report.

      " + "documentation":"

      The destination bucket where Audit Manager stores assessment reports.

      " } }, "documentation":"

      The location where Audit Manager saves assessment reports for the given assessment.

      " @@ -1994,7 +2011,7 @@ }, "type":{ "shape":"ControlType", - "documentation":"

      The type of control, such as a custom control or a standard control.

      " + "documentation":"

      Specifies whether the control is a standard control or a custom control.

      " }, "name":{ "shape":"ControlName", @@ -2195,7 +2212,7 @@ "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ "shape":"SourceFrequency", - "documentation":"

      The frequency of evidence collection for the control mapping source.

      " + "documentation":"

      Specifies how often evidence is collected from the control mapping source.

      " }, "troubleshootingText":{ "shape":"TroubleshootingText", @@ -2504,7 +2521,7 @@ "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ "shape":"SourceFrequency", - "documentation":"

      The frequency of evidence collection for the control mapping source.

      " + "documentation":"

      Specifies how often evidence is collected from the control mapping source.

      " }, "troubleshootingText":{ "shape":"TroubleshootingText", @@ -2598,6 +2615,20 @@ "min":1, "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" }, + "DefaultExportDestination":{ + "type":"structure", + "members":{ + "destinationType":{ + "shape":"ExportDestinationType", + "documentation":"

      The destination type, such as Amazon S3.

      " + }, + "destination":{ + "shape":"S3Url", + "documentation":"

      The destination bucket where Audit Manager stores exported files.

      " + } + }, + "documentation":"

      The default s3 bucket where Audit Manager saves the files that you export from evidence finder.

      " + }, "Delegation":{ "type":"structure", "members":{ @@ -3055,6 +3086,10 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "ExportDestinationType":{ + "type":"string", + "enum":["S3"] + }, "Filename":{ "type":"string", "max":255, @@ -3078,11 +3113,11 @@ }, "type":{ "shape":"FrameworkType", - "documentation":"

      The framework type, such as a custom framework or a standard framework.

      " + "documentation":"

      Specifies whether the framework is a standard framework or a custom framework.

      " }, "complianceType":{ "shape":"ComplianceType", - "documentation":"

      The compliance type that the new custom framework supports, such as CIS or HIPAA.

      " + "documentation":"

      The compliance type that the framework supports, such as CIS or HIPAA.

      " }, "description":{ "shape":"FrameworkDescription", @@ -3094,7 +3129,7 @@ }, "controlSources":{ "shape":"ControlSources", - "documentation":"

      The sources that Audit Manager collects evidence from for the control.

      " + "documentation":"

      The control data sources where Audit Manager collects evidence from.

      " }, "controlSets":{ "shape":"ControlSets", @@ -3321,7 +3356,7 @@ "members":{ "control":{ "shape":"Control", - "documentation":"

      The name of the control that the GetControl API returned.

      " + "documentation":"

      The details of the control that the GetControl API returned.

      " } } }, @@ -3408,6 +3443,31 @@ } } }, + "GetEvidenceFileUploadUrlRequest":{ + "type":"structure", + "required":["fileName"], + "members":{ + "fileName":{ + "shape":"ManualEvidenceLocalFileName", + "documentation":"

      The file that you want to upload. For a list of supported file formats, see Supported file types for manual evidence in the Audit Manager User Guide.

      ", + "location":"querystring", + "locationName":"fileName" + } + } + }, + "GetEvidenceFileUploadUrlResponse":{ + "type":"structure", + "members":{ + "evidenceFileName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the uploaded manual evidence file that the presigned URL was generated for.

      " + }, + "uploadUrl":{ + "shape":"NonEmptyString", + "documentation":"

      The presigned URL that was generated.

      " + } + } + }, "GetEvidenceFolderRequest":{ "type":"structure", "required":[ @@ -3757,7 +3817,11 @@ }, "KeywordInputType":{ "type":"string", - "enum":["SELECT_FROM_LIST"] + "enum":[ + "SELECT_FROM_LIST", + "UPLOAD_FILE", + "INPUT_TEXT" + ] }, "KeywordValue":{ "type":"string", @@ -3893,7 +3957,7 @@ "members":{ "frameworkMetadataList":{ "shape":"FrameworkMetadataList", - "documentation":"

      The list of metadata objects for the framework.

      " + "documentation":"

      A list of metadata that the ListAssessmentFrameworks API returns for each framework.

      " }, "nextToken":{ "shape":"Token", @@ -3963,7 +4027,7 @@ "members":{ "assessmentMetadata":{ "shape":"ListAssessmentMetadata", - "documentation":"

      The metadata that's associated with the assessment.

      " + "documentation":"

      The metadata that the ListAssessments API returns for each assessment.

      " }, "nextToken":{ "shape":"Token", @@ -4104,7 +4168,7 @@ "members":{ "controlMetadataList":{ "shape":"ControlMetadataList", - "documentation":"

      The list of control metadata objects that the ListControls API returned.

      " + "documentation":"

      A list of metadata that the ListControls API returns for each control.

      " }, "nextToken":{ "shape":"Token", @@ -4205,10 +4269,18 @@ "members":{ "s3ResourcePath":{ "shape":"S3Url", - "documentation":"

      The Amazon S3 URL that points to a manual evidence object.

      " + "documentation":"

      The S3 URL of the object that's imported as manual evidence.

      " + }, + "textResponse":{ + "shape":"ManualEvidenceTextResponse", + "documentation":"

      The plain text response that's entered and saved as manual evidence.

      " + }, + "evidenceFileName":{ + "shape":"ManualEvidenceLocalFileName", + "documentation":"

      The name of the file that's uploaded as manual evidence. This name is populated using the evidenceFileName value from the GetEvidenceFileUploadUrl API response.

      " } }, - "documentation":"

      Evidence that's uploaded to Audit Manager manually.

      " + "documentation":"

      Evidence that's manually added to a control in Audit Manager. manualEvidence can be one of the following: evidenceFileName, s3ResourcePath, or textResponse.

      " }, "ManualEvidenceList":{ "type":"list", @@ -4216,6 +4288,18 @@ "max":50, "min":1 }, + "ManualEvidenceLocalFileName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"[^\\/]*" + }, + "ManualEvidenceTextResponse":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, "MaxResults":{ "type":"integer", "documentation":"Max results in the page.", @@ -4480,7 +4564,8 @@ "DEFAULT_ASSESSMENT_REPORTS_DESTINATION", "DEFAULT_PROCESS_OWNERS", "EVIDENCE_FINDER_ENABLEMENT", - "DEREGISTRATION_POLICY" + "DEREGISTRATION_POLICY", + "DEFAULT_EXPORT_DESTINATION" ] }, "Settings":{ @@ -4496,7 +4581,7 @@ }, "defaultAssessmentReportsDestination":{ "shape":"AssessmentReportsDestination", - "documentation":"

      The default storage destination for assessment reports.

      " + "documentation":"

      The default S3 destination bucket for storing assessment reports.

      " }, "defaultProcessOwners":{ "shape":"Roles", @@ -4513,6 +4598,10 @@ "deregistrationPolicy":{ "shape":"DeregistrationPolicy", "documentation":"

      The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager.

      " + }, + "defaultExportDestination":{ + "shape":"DefaultExportDestination", + "documentation":"

      The default S3 destination bucket for storing evidence finder exports.

      " } }, "documentation":"

      The settings object that holds all supported Audit Manager settings.

      " @@ -4574,14 +4663,14 @@ "members":{ "keywordInputType":{ "shape":"KeywordInputType", - "documentation":"

      The input method for the keyword.

      " + "documentation":"

      The input method for the keyword.

      • SELECT_FROM_LIST is used when mapping a data source for automated evidence.

        • When keywordInputType is SELECT_FROM_LIST, a keyword must be selected to collect automated evidence. For example, this keyword can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

      • UPLOAD_FILE and INPUT_TEXT are only used when mapping a data source for manual evidence.

        • When keywordInputType is UPLOAD_FILE, a file must be uploaded as manual evidence.

        • When keywordInputType is INPUT_TEXT, text must be entered as manual evidence.

      " }, "keywordValue":{ "shape":"KeywordValue", - "documentation":"

      The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

      If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

      • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules.

      • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the rule from a managed rule.

        • Custom rule name: my-custom-config-rule

          keywordValue: Custom_my-custom-config-rule

      • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

        • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

          keywordValue: Custom_CustomRuleForAccount-conformance-pack

        • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

          keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

      " + "documentation":"

      The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

      If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

      • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules. For some rules, the rule identifier is different from the rule name. For example, the rule name restricted-ssh has the following rule identifier: INCOMING_SSH_DISABLED. Make sure to use the rule identifier, not the rule name.

        Keyword example for managed rules:

      • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the custom rule from a managed rule.

        Keyword example for custom rules:

        • Custom rule name: my-custom-config-rule

          keywordValue: Custom_my-custom-config-rule

      • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

        Keyword examples for service-linked rules:

        • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

          keywordValue: Custom_CustomRuleForAccount-conformance-pack

        • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

          keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

      The keywordValue is case sensitive. If you enter a value incorrectly, Audit Manager might not recognize the data source mapping. As a result, you might not successfully collect evidence from that data source as intended.

      Keep in mind the following requirements, depending on the data source type that you're using.

      1. For Config:

        • For managed rules, make sure that the keywordValue is the rule identifier in ALL_CAPS_WITH_UNDERSCORES. For example, CLOUDWATCH_LOG_GROUP_ENCRYPTED. For accuracy, we recommend that you reference the list of supported Config managed rules.

        • For custom rules, make sure that the keywordValue has the Custom_ prefix followed by the custom rule name. The format of the custom rule name itself may vary. For accuracy, we recommend that you visit the Config console to verify your custom rule name.

      2. For Security Hub: The format varies for Security Hub control names. For accuracy, we recommend that you reference the list of supported Security Hub controls.

      3. For Amazon Web Services API calls: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, iam_ListGroups. For accuracy, we recommend that you reference the list of supported API calls.

      4. For CloudTrail: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, cloudtrail_StartLogging. For accuracy, we recommend that you review the Amazon Web Service prefix and action names in the Service Authorization Reference.

      " } }, - "documentation":"

      The keyword to search for in CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names.

      To learn more about the supported keywords that you can use when mapping a control data source, see the following pages in the Audit Manager User Guide:

      " + "documentation":"

      A keyword that relates to the control data source.

      For manual evidence, this keyword indicates if the manual evidence is a file or text.

      For automated evidence, this keyword identifies a specific CloudTrail event, Config rule, Security Hub control, or Amazon Web Services API name.

      To learn more about the supported keywords that you can use when mapping a control data source, see the following pages in the Audit Manager User Guide:

      " }, "SourceName":{ "type":"string", @@ -5092,7 +5181,7 @@ }, "defaultAssessmentReportsDestination":{ "shape":"AssessmentReportsDestination", - "documentation":"

      The default storage destination for assessment reports.

      " + "documentation":"

      The default S3 destination bucket for storing assessment reports.

      " }, "defaultProcessOwners":{ "shape":"Roles", @@ -5109,6 +5198,10 @@ "deregistrationPolicy":{ "shape":"DeregistrationPolicy", "documentation":"

      The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager.

      " + }, + "defaultExportDestination":{ + "shape":"DefaultExportDestination", + "documentation":"

      The default S3 destination bucket for storing evidence finder exports.

      " } } }, diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index ed5bf2190926..a61472a8b2c6 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 2f4002951ba8..64b0337be712 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index a4122bbc1155..5f7f5047d6cd 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json index bc0f7812b416..49fbf7acba9a 100644 --- a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,154 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -286,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -295,28 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/backup/src/main/resources/codegen-resources/endpoint-tests.json b/services/backup/src/main/resources/codegen-resources/endpoint-tests.json index f03ef8448e2c..4985c744eb05 100644 --- a/services/backup/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/backup/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -451,8 +451,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -464,8 +475,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -477,8 +499,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -490,8 +523,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -503,12 +547,12 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -516,8 +560,21 @@ }, "params": { "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -528,8 +585,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -540,10 +597,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 577f2194092f..567173a9d367 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -1046,7 +1046,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} ], "documentation":"

      Recovers the saved resource identified by an Amazon Resource Name (ARN).

      ", "idempotent":true @@ -1520,7 +1521,7 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

      " + "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

      During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

      " }, "CompletionWindowMinutes":{ "shape":"WindowMinutes", @@ -1570,7 +1571,7 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

      " + "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

      During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

      " }, "CompletionWindowMinutes":{ "shape":"WindowMinutes", @@ -2875,11 +2876,11 @@ }, "Status":{ "shape":"RecoveryPointStatus", - "documentation":"

      A status code specifying the state of the recovery point.

      PARTIAL status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.

      EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

      STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup.

      To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

      " + "documentation":"

      A status code specifying the state of the recovery point.

      PARTIAL status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.

      EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

      STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup.

      To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

      For SAP HANA on Amazon EC2 STOPPED status occurs due to user action, application misconfiguration, or backup failure. To ensure that future continuous backups succeed, refer to the recovery point status and check SAP HANA for details.

      " }, "StatusMessage":{ "shape":"string", - "documentation":"

      A status message explaining the reason for the recovery point deletion failure.

      " + "documentation":"

      A status message explaining the status of the recovery point.

      " }, "CreationDate":{ "shape":"timestamp", @@ -4732,6 +4733,18 @@ "RecoveryPointArn":{ "shape":"ARN", "documentation":"

      This is the Amazon Resource Name (ARN) of the parent (composite) recovery point.

      " + }, + "ResourceArn":{ + "shape":"ARN", + "documentation":"

      This is the Amazon Resource Name (ARN) that uniquely identifies a saved resource.

      " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

      This is the Amazon Web Services resource type that is saved as a recovery point.

      " + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

      This is the name of the backup vault (the logical container in which backups are stored).

      " } }, "documentation":"

      This is a recovery point which is a child (nested) recovery point of a parent (composite) recovery point. These recovery points can be disassociated from their parent (composite) recovery point, in which case they will no longer be a member.

      " @@ -5093,7 +5106,7 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors.

      " + "documentation":"

      A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors.

      During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

      " }, "CompleteWindowMinutes":{ "shape":"WindowMinutes", @@ -5234,7 +5247,11 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

      Starts a job to restore a recovery point for one of the following resources:

      • Aurora for Amazon Aurora

      • DocumentDB for Amazon DocumentDB (with MongoDB compatibility)

      • DynamoDB for Amazon DynamoDB

      • EBS for Amazon Elastic Block Store

      • EC2 for Amazon Elastic Compute Cloud

      • EFS for Amazon Elastic File System

      • FSx for Amazon FSx

      • Neptune for Amazon Neptune

      • RDS for Amazon Relational Database Service

      • Storage Gateway for Storage Gateway

      • S3 for Amazon S3

      • VirtualMachine for virtual machines

      " + "documentation":"

      Starts a job to restore a recovery point for one of the following resources:

      • Aurora for Amazon Aurora

      • DocumentDB for Amazon DocumentDB (with MongoDB compatibility)

      • CloudFormation for CloudFormation

      • DynamoDB for Amazon DynamoDB

      • EBS for Amazon Elastic Block Store

      • EC2 for Amazon Elastic Compute Cloud

      • EFS for Amazon Elastic File System

      • FSx for Amazon FSx

      • Neptune for Amazon Neptune

      • RDS for Amazon Relational Database Service

      • Redshift for Amazon Redshift

      • Storage Gateway for Storage Gateway

      • S3 for Amazon S3

      • Timestream for Amazon Timestream

      • VirtualMachine for virtual machines

      " + }, + "CopySourceTagsToRestoredResource":{ + "shape":"boolean", + "documentation":"

      This is an optional parameter. If this equals True, tags included in the backup will be copied to the restored resource.

      This can only be applied to backups created through Backup.

      " } } }, diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 373ebb3f0f52..479469c197ca 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index a82e29b4690a..f557508650d7 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index b292a6f34bc9..0dd2fe8fcc0b 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 023121439a3b..f6e33506e91d 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 4d7fea191c76..6becdb56bb08 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index b86e8113fc4d..853e21e799e4 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 5b6b92f08680..f99dbfd1186a 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/customization.config b/services/chime/src/main/resources/codegen-resources/customization.config index 6ca40123e540..254455474301 100644 --- a/services/chime/src/main/resources/codegen-resources/customization.config +++ b/services/chime/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,6 @@ { "verifiedSimpleMethods" : [ "listAccounts" - ] + ], + "generateEndpointClientTests": true } diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 8fd775822801..c94bd60976da 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkidentity/src/main/resources/codegen-resources/endpoint-tests.json b/services/chimesdkidentity/src/main/resources/codegen-resources/endpoint-tests.json index dd83a35bf054..faecf435fc98 100644 --- a/services/chimesdkidentity/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/chimesdkidentity/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -175,9 +175,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -188,9 +188,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -199,9 +199,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -212,9 +212,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -223,9 +223,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -236,9 +236,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -247,9 +247,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -260,9 +260,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -273,9 +273,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -298,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -310,9 +310,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json b/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json index 45b06f76c6dd..895d8ff04899 100644 --- a/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkidentity/src/main/resources/codegen-resources/service-2.json @@ -781,11 +781,11 @@ "type":"structure", "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      " }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      " }, "Name":{ @@ -827,11 +827,11 @@ "type":"structure", "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      " }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      " }, "Name":{ @@ -1165,13 +1165,13 @@ ], "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      ", "location":"uri", "locationName":"appInstanceUserArn" }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      ", "location":"uri", "locationName":"endpointId" @@ -1258,13 +1258,13 @@ ], "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveString1600", + "shape":"String1600", "documentation":"

      The ARN of the AppInstanceUser.

      ", "location":"uri", "locationName":"appInstanceUserArn" }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      ", "location":"uri", "locationName":"endpointId" @@ -1441,6 +1441,24 @@ }, "documentation":"

      The details of a user or bot.

      " }, + "InvokedBy":{ + "type":"structure", + "required":[ + "StandardMessages", + "TargetedMessages" + ], + "members":{ + "StandardMessages":{ + "shape":"StandardMessages", + "documentation":"

      Sets standard messages as the bot trigger. For standard messages:

      • ALL: The bot processes all standard messages.

      • AUTO: The bot responds to ALL messages when the channel has one other non-hidden member, and responds to MENTIONS when the channel has more than one other non-hidden member.

      • MENTIONS: The bot processes all standard messages that have a message attribute with CHIME.mentions and a value of the bot ARN.

      • NONE: The bot processes no standard messages.

      " + }, + "TargetedMessages":{ + "shape":"TargetedMessages", + "documentation":"

      Sets targeted messages as the bot trigger. For targeted messages:

      • ALL: The bot processes all TargetedMessages sent to it. The bot then responds with a targeted message back to the sender.

      • NONE: The bot processes no targeted messages.

      " + } + }, + "documentation":"

      Specifies the type of message that triggers a bot.

      " + }, "LexBotAliasArn":{ "type":"string", "max":2048, @@ -1450,14 +1468,17 @@ "LexConfiguration":{ "type":"structure", "required":[ - "RespondsTo", "LexBotAliasArn", "LocaleId" ], "members":{ "RespondsTo":{ "shape":"RespondsTo", - "documentation":"

      Determines whether the Amazon Lex V2 bot responds to all standard messages. Control messages are not supported.

      " + "documentation":"

      Deprecated. Use InvokedBy instead.

      Determines whether the Amazon Lex V2 bot responds to all standard messages. Control messages are not supported.

      " + }, + "InvokedBy":{ + "shape":"InvokedBy", + "documentation":"

      Specifies the type of message that triggers a bot.

      " }, "LexBotAliasArn":{ "shape":"LexBotAliasArn", @@ -1842,11 +1863,11 @@ "type":"structure", "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      " }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      " } } @@ -1891,13 +1912,6 @@ "pattern":".*", "sensitive":true }, - "SensitiveString64":{ - "type":"string", - "max":64, - "min":0, - "pattern":".*", - "sensitive":true - }, "ServiceFailureException":{ "type":"structure", "members":{ @@ -1920,7 +1934,28 @@ "exception":true, "fault":true }, + "StandardMessages":{ + "type":"string", + "enum":[ + "AUTO", + "ALL", + "MENTIONS", + "NONE" + ] + }, "String":{"type":"string"}, + "String1600":{ + "type":"string", + "max":1600, + "min":0, + "pattern":".*" + }, + "String64":{ + "type":"string", + "max":64, + "min":0, + "pattern":".*" + }, "Tag":{ "type":"structure", "required":[ @@ -1980,6 +2015,13 @@ "min":1, "sensitive":true }, + "TargetedMessages":{ + "type":"string", + "enum":[ + "ALL", + "NONE" + ] + }, "ThrottledClientException":{ "type":"structure", "members":{ @@ -2039,6 +2081,10 @@ "Metadata":{ "shape":"Metadata", "documentation":"

      The metadata of the AppInstanceBot.

      " + }, + "Configuration":{ + "shape":"Configuration", + "documentation":"

      The configuration for the bot update.

      " } } }, @@ -2092,13 +2138,13 @@ ], "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      ", "location":"uri", "locationName":"appInstanceUserArn" }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      ", "location":"uri", "locationName":"endpointId" @@ -2117,11 +2163,11 @@ "type":"structure", "members":{ "AppInstanceUserArn":{ - "shape":"SensitiveChimeArn", + "shape":"ChimeArn", "documentation":"

      The ARN of the AppInstanceUser.

      " }, "EndpointId":{ - "shape":"SensitiveString64", + "shape":"String64", "documentation":"

      The unique identifier of the AppInstanceUserEndpoint.

      " } } diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index cb29c68a5212..421c73e33705 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index 424d55125e87..b1242b8ae575 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index 1da74748a13c..b35582b5ee7e 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json index 46e5b47e210a..1896b0b39eb1 100644 --- a/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkmessaging/src/main/resources/codegen-resources/service-2.json @@ -70,7 +70,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Calls back Chime SDK Messaging with a processing response message. This should be invoked from the processor Lambda. This is a developer API.

      You can return one of the following processing responses:

      • Update message content or metadata

      • Deny a message

      • Make no changes to the message

      " + "documentation":"

      Calls back Amazon Chime SDK messaging with a processing response message. This should be invoked from the processor Lambda. This is a developer API.

      You can return one of the following processing responses:

      • Update message content or metadata

      • Deny a message

      • Make no changes to the message

      " }, "CreateChannel":{ "name":"CreateChannel", @@ -133,7 +133,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Creates a channel flow, a container for processors. Processors are AWS Lambda functions that perform actions on chat messages, such as stripping out profanity. You can associate channel flows with channels, and the processors in the channel flow then take action on all messages sent to that channel. This is a developer API.

      Channel flows process the following items:

      1. New and updated messages

      2. Persistent and non-persistent messages

      3. The Standard message type

      Channel flows don't process Control or System messages. For more information about the message types provided by Chime SDK Messaging, refer to Message types in the Amazon Chime developer guide.

      " + "documentation":"

      Creates a channel flow, a container for processors. Processors are AWS Lambda functions that perform actions on chat messages, such as stripping out profanity. You can associate channel flows with channels, and the processors in the channel flow then take action on all messages sent to that channel. This is a developer API.

      Channel flows process the following items:

      1. New and updated messages

      2. Persistent and non-persistent messages

      3. The Standard message type

      Channel flows don't process Control or System messages. For more information about the message types provided by Chime SDK messaging, refer to Message types in the Amazon Chime developer guide.

      " }, "CreateChannelMembership":{ "name":"CreateChannelMembership", @@ -480,7 +480,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Gets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified channel. A user or a bot must be a member of the channel and own the membership to be able to retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't retrieve preferences for other users or bots. Banned users or bots can't retrieve membership preferences for the channel from which they are banned.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " + "documentation":"

      Gets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified channel. A user or a bot must be a member of the channel and own the membership in order to retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't retrieve preferences for other users or bots. Banned users or bots can't retrieve membership preferences for the channel from which they are banned.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " }, "GetChannelMessage":{ "name":"GetChannelMessage", @@ -519,7 +519,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Gets message status for a specified messageId. Use this API to determine the intermediate status of messages going through channel flow processing. The API provides an alternative to retrieving message status if the event was not received because a client wasn't connected to a websocket.

      Messages can have any one of these statuses.

      SENT

      Message processed successfully

      PENDING

      Ongoing processing

      FAILED

      Processing failed

      DENIED

      Messasge denied by the processor

      • This API does not return statuses for denied messages, because we don't store them once the processor denies them.

      • Only the message sender can invoke this API.

      • The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " + "documentation":"

      Gets message status for a specified messageId. Use this API to determine the intermediate status of messages going through channel flow processing. The API provides an alternative to retrieving message status if the event was not received because a client wasn't connected to a websocket.

      Messages can have any one of these statuses.

      SENT

      Message processed successfully

      PENDING

      Ongoing processing

      FAILED

      Processing failed

      DENIED

      Message denied by the processor

      • This API does not return statuses for denied messages, because we don't store them once the processor denies them.

      • Only the message sender can invoke this API.

      • The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " }, "GetMessagingSessionEndpoint":{ "name":"GetMessagingSessionEndpoint", @@ -633,7 +633,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Lists all channels that anr AppInstanceUser or AppInstanceBot is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " + "documentation":"

      Lists all channels that an AppInstanceUser or AppInstanceBot is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " }, "ListChannelMessages":{ "name":"ListChannelMessages", @@ -806,7 +806,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Sets the membership preferences of an AppInstanceUser or AppIntanceBot for the specified channel. The user or bot must be a member of the channel. Only the user or bot who owns the membership can set preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't set preferences for other users or users. Banned users or bots can't set membership preferences for the channel from which they are banned.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " + "documentation":"

      Sets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified channel. The user or bot must be a member of the channel. Only the user or bot who owns the membership can set preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't set preferences for other users. Banned users or bots can't set membership preferences for the channel from which they are banned.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      " }, "PutMessagingStreamingConfigurations":{ "name":"PutMessagingStreamingConfigurations", @@ -886,7 +886,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Sends a message to a particular channel that the member is a part of.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      Also, STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no metadata.

      " + "documentation":"

      Sends a message to a particular channel that the member is a part of.

      The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header.

      Also, STANDARD messages can be up to 4KB in size and contain metadata. Metadata is arbitrary, and you can use it in a variety of ways, such as containing a link to an attachment.

      CONTROL messages are limited to 30 bytes and do not contain metadata.

      " }, "TagResource":{ "name":"TagResource", @@ -1493,7 +1493,7 @@ }, "Content":{ "shape":"Content", - "documentation":"

      The message content.

      " + "documentation":"

      The content of the channel message. For Amazon Lex V2 bot responses, this field holds a list of messages originating from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "Metadata":{ "shape":"Metadata", @@ -1533,7 +1533,7 @@ }, "MessageAttributes":{ "shape":"MessageAttributeMap", - "documentation":"

      The attributes for the message, used for message filtering along with a FilterRule defined in the PushNotificationPreferences.

      " + "documentation":"

      The attributes for the channel message. For Amazon Lex V2 bot responses, the attributes are mapped to specific fields from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "SubChannelId":{ "shape":"SubChannelId", @@ -1541,7 +1541,11 @@ }, "ContentType":{ "shape":"ContentType", - "documentation":"

      The content type of the channel message.

      " + "documentation":"

      The content type of the channel message. For Amazon Lex V2 bot responses, the content type is application/amz-chime-lex-msgs for success responses and application/amz-chime-lex-error for failure responses. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " + }, + "Target":{ + "shape":"TargetList", + "documentation":"

      The target of a message, a sender, a user, or a bot. Only the target and the sender can view targeted messages. Only users who can see targeted messages can take actions on them. However, administrators can delete targeted messages that they can’t see.

      " } }, "documentation":"

      The details of a message in a channel.

      " @@ -1556,7 +1560,7 @@ }, "Content":{ "shape":"NonEmptyContent", - "documentation":"

      The message content.

      " + "documentation":"

      The message content. For Amazon Lex V2 bot responses, this field holds a list of messages originating from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "Metadata":{ "shape":"Metadata", @@ -1568,7 +1572,7 @@ }, "MessageAttributes":{ "shape":"MessageAttributeMap", - "documentation":"

      The attributes for the message, used for message filtering along with a FilterRule defined in the PushNotificationPreferences.

      " + "documentation":"

      The attributes for the channel message. For Amazon Lex V2 bot responses, the attributes are mapped to specific fields from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "SubChannelId":{ "shape":"SubChannelId", @@ -1576,7 +1580,7 @@ }, "ContentType":{ "shape":"ContentType", - "documentation":"

      The content type of the call-back message.

      " + "documentation":"

      The content type of the call-back message. For Amazon Lex V2 bot responses, the content type is application/amz-chime-lex-msgs for success responses and application/amz-chime-lex-error for failure responses. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " } }, "documentation":"

      Stores information about a callback.

      " @@ -1606,7 +1610,7 @@ }, "Detail":{ "shape":"StatusDetail", - "documentation":"

      Contains more details about the messasge status.

      " + "documentation":"

      Contains more details about the message status.

      " } }, "documentation":"

      Stores information about a message status.

      " @@ -1620,7 +1624,7 @@ }, "Content":{ "shape":"Content", - "documentation":"

      The content of the message.

      " + "documentation":"

      The content of the channel message. For Amazon Lex V2 bot responses, this field holds a list of messages originating from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "Metadata":{ "shape":"Metadata", @@ -1656,11 +1660,15 @@ }, "MessageAttributes":{ "shape":"MessageAttributeMap", - "documentation":"

      The message attribues listed in a the summary of a channel message.

      " + "documentation":"

      The attributes for the channel message. For Amazon Lex V2 bot responses, the attributes are mapped to specific fields from the bot. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " }, "ContentType":{ "shape":"ContentType", - "documentation":"

      The content type of the channel messsage listed in the summary.

      " + "documentation":"

      The content type of the channel message listed in the summary. For Amazon Lex V2 bot responses, the content type is application/amz-chime-lex-msgs for success responses and application/amz-chime-lex-error for failure responses. For more information, refer to Processing responses from an AppInstanceBot in the Amazon Chime SDK Messaging Developer Guide.

      " + }, + "Target":{ + "shape":"TargetList", + "documentation":"

      The target of a message, a sender, a user, or a bot. Only the target and the sender can view targeted messages. Only users who can see targeted messages can take actions on them. However, administrators can delete targeted messages that they can’t see.

      " } }, "documentation":"

      Summary of the messages in a Channel.

      " @@ -1771,7 +1779,7 @@ }, "LastMessageTimestamp":{ "shape":"Timestamp", - "documentation":"

      The time at which the last persistent message in a channel was sent.

      " + "documentation":"

      The time at which the last persistent message visible to the caller in a channel was sent.

      " } }, "documentation":"

      Summary of the details of a Channel.

      " @@ -3838,11 +3846,11 @@ }, "Content":{ "shape":"NonEmptyContent", - "documentation":"

      The content of the message.

      " + "documentation":"

      The content of the channel message.

      " }, "Type":{ "shape":"ChannelMessageType", - "documentation":"

      The type of message, STANDARD or CONTROL.

      " + "documentation":"

      The type of message, STANDARD or CONTROL.

      STANDARD messages can be up to 4KB in size and contain metadata. Metadata is arbitrary, and you can use it in a variety of ways, such as containing a link to an attachment.

      CONTROL messages are limited to 30 bytes and do not contain metadata.

      " }, "Persistence":{ "shape":"ChannelMessagePersistenceType", @@ -3878,6 +3886,10 @@ "ContentType":{ "shape":"ContentType", "documentation":"

      The content type of the channel message.

      " + }, + "Target":{ + "shape":"TargetList", + "documentation":"

      The target of a message. Must be a member of the channel, such as another user, a bot, or the sender. Only the target and the sender can view targeted messages. Only users who can see targeted messages can take actions on them. However, administrators can delete targeted messages that they can’t see.

      " } } }, @@ -4045,6 +4057,22 @@ "min":1, "sensitive":true }, + "Target":{ + "type":"structure", + "members":{ + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

      The ARN of the target channel member.

      " + } + }, + "documentation":"

      The target of a message, a sender, a user, or a bot. Only the target and the sender can view targeted messages. Only users who can see targeted messages can take actions on them. However, administrators can delete targeted messages that they can’t see.

      " + }, + "TargetList":{ + "type":"list", + "member":{"shape":"Target"}, + "max":1, + "min":1 + }, "TargetMembershipsPerSubChannel":{ "type":"integer", "min":2 @@ -4143,7 +4171,7 @@ }, "Content":{ "shape":"NonEmptyContent", - "documentation":"

      The content of the message being updated.

      " + "documentation":"

      The content of the channel message.

      " }, "Metadata":{ "shape":"Metadata", @@ -4263,5 +4291,5 @@ "max":4096 } }, - "documentation":"

      The Amazon Chime SDK Messaging APIs in this section allow software developers to send and receive messages in custom messaging applications. These APIs depend on the frameworks provided by the Amazon Chime SDK Identity APIs. For more information about the messaging APIs, see Amazon Chime SDK messaging.

      " + "documentation":"

      The Amazon Chime SDK messaging APIs in this section allow software developers to send and receive messages in custom messaging applications. These APIs depend on the frameworks provided by the Amazon Chime SDK identity APIs. For more information about the messaging APIs, see Amazon Chime SDK messaging.

      " } diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 01e088b42485..6312876a4e5e 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json b/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json index 5cbb54463d98..b94a9295a125 100644 --- a/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json @@ -2108,6 +2108,13 @@ }, "documentation":"

      The details of an Amazon Chime SDK Voice Connector call.

      " }, + "CallLegType":{ + "type":"string", + "enum":[ + "Caller", + "Callee" + ] + }, "CallingName":{ "type":"string", "pattern":"^$|^[a-zA-Z0-9 ]{2,15}$", @@ -5017,6 +5024,10 @@ "ClientRequestToken":{ "shape":"ClientRequestId", "documentation":"

      The unique identifier for the client request. Use a different token for different speaker search tasks.

      " + }, + "CallLeg":{ + "shape":"CallLegType", + "documentation":"

      Specifies which call leg to stream for speaker search.

      " } } }, diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 9de5754b9adb..a06d97ce1a6f 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 846bab8704eb..8c9d6adf24cb 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index 0a7f9d21fc5f..9931622042df 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index a4e0ebcac239..5bb2a9392d4c 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 371031a10dc0..4aaadc156c06 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 72298da019ca..9b07253c5b67 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -11,6 +11,23 @@ "xmlNamespace":"http://cloudformation.amazonaws.com/doc/2010-05-15/" }, "operations":{ + "ActivateOrganizationsAccess":{ + "name":"ActivateOrganizationsAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ActivateOrganizationsAccessInput"}, + "output":{ + "shape":"ActivateOrganizationsAccessOutput", + "resultWrapper":"ActivateOrganizationsAccessResult" + }, + "errors":[ + {"shape":"InvalidOperationException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

      Activate trusted access with Organizations. With trusted access between StackSets and Organizations activated, the management account has permissions to create and manage StackSets for your organization.

      " + }, "ActivateType":{ "name":"ActivateType", "http":{ @@ -26,7 +43,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

      Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.

      Once you have activated a public third-party extension in your account and region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      ", + "documentation":"

      Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.

      Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      ", "idempotent":true }, "BatchDescribeTypeConfigurations":{ @@ -44,7 +61,7 @@ {"shape":"TypeConfigurationNotFoundException"}, {"shape":"CFNRegistryException"} ], - "documentation":"

      Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and region.

      For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " + "documentation":"

      Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region.

      For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " }, "CancelUpdateStack":{ "name":"CancelUpdateStack", @@ -109,7 +126,7 @@ {"shape":"TokenAlreadyExistsException"}, {"shape":"InsufficientCapabilitiesException"} ], - "documentation":"

      Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacksoperation.

      " + "documentation":"

      Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation.

      " }, "CreateStackInstances":{ "name":"CreateStackInstances", @@ -150,6 +167,23 @@ ], "documentation":"

      Creates a stack set.

      " }, + "DeactivateOrganizationsAccess":{ + "name":"DeactivateOrganizationsAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeactivateOrganizationsAccessInput"}, + "output":{ + "shape":"DeactivateOrganizationsAccessOutput", + "resultWrapper":"DeactivateOrganizationsAccessResult" + }, + "errors":[ + {"shape":"InvalidOperationException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

      Deactivates trusted access with Organizations. If trusted access is deactivated, the management account does not have permissions to create and manage service-managed StackSets for your organization.

      " + }, "DeactivateType":{ "name":"DeactivateType", "http":{ @@ -165,7 +199,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

      Deactivates a public extension that was previously activated in this account and region.

      Once deactivated, an extension can't be used in any CloudFormation operation. This includes stack update operations where the stack template includes the extension, even if no updates are being made to the extension. In addition, deactivated extensions aren't automatically updated if a new version of the extension is released.

      ", + "documentation":"

      Deactivates a public extension that was previously activated in this account and Region.

      Once deactivated, an extension can't be used in any CloudFormation operation. This includes stack update operations where the stack template includes the extension, even if no updates are being made to the extension. In addition, deactivated extensions aren't automatically updated if a new version of the extension is released.

      ", "idempotent":true }, "DeleteChangeSet":{ @@ -296,6 +330,23 @@ ], "documentation":"

      Returns hook-related information for the change set and a list of changes that CloudFormation makes when you run the change set.

      " }, + "DescribeOrganizationsAccess":{ + "name":"DescribeOrganizationsAccess", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrganizationsAccessInput"}, + "output":{ + "shape":"DescribeOrganizationsAccessOutput", + "resultWrapper":"DescribeOrganizationsAccessResult" + }, + "errors":[ + {"shape":"InvalidOperationException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

      Retrieves information about the account's OrganizationAccess status. This API can be called either by the management account or the delegated administrator by using the CallAs parameter. This API can also be called without the CallAs parameter by the management account.

      " + }, "DescribePublisher":{ "name":"DescribePublisher", "http":{ @@ -324,7 +375,7 @@ "shape":"DescribeStackDriftDetectionStatusOutput", "resultWrapper":"DescribeStackDriftDetectionStatusResult" }, - "documentation":"

      Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

      " + "documentation":"

      Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

      " }, "DescribeStackEvents":{ "name":"DescribeStackEvents", @@ -354,7 +405,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

      Returns the stack instance that's associated with the specified stack set, Amazon Web Services account, and Region.

      For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

      " + "documentation":"

      Returns the stack instance that's associated with the specified StackSet, Amazon Web Services account, and Amazon Web Services Region.

      For a list of stack instances that are associated with a specific StackSet, use ListStackInstances.

      " }, "DescribeStackResource":{ "name":"DescribeStackResource", @@ -409,7 +460,7 @@ "errors":[ {"shape":"StackSetNotFoundException"} ], - "documentation":"

      Returns the description of the specified stack set.

      " + "documentation":"

      Returns the description of the specified StackSet.

      " }, "DescribeStackSetOperation":{ "name":"DescribeStackSetOperation", @@ -426,7 +477,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"OperationNotFoundException"} ], - "documentation":"

      Returns the description of the specified stack set operation.

      " + "documentation":"

      Returns the description of the specified StackSet operation.

      " }, "DescribeStacks":{ "name":"DescribeStacks", @@ -473,7 +524,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

      Returns information about an extension's registration, including its current status and type and version identifiers.

      When you initiate a registration request using RegisterType , you can then use DescribeTypeRegistration to monitor the progress of that registration request.

      Once the registration request has completed, use DescribeType to return detailed information about an extension.

      ", + "documentation":"

      Returns information about an extension's registration, including its current status and type and version identifiers.

      When you initiate a registration request using RegisterType, you can then use DescribeTypeRegistration to monitor the progress of that registration request.

      Once the registration request has completed, use DescribeType to return detailed information about an extension.

      ", "idempotent":true }, "DetectStackDrift":{ @@ -487,7 +538,7 @@ "shape":"DetectStackDriftOutput", "resultWrapper":"DetectStackDriftResult" }, - "documentation":"

      Detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

      For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.

      DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

      When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.

      " + "documentation":"

      Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

      For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.

      DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

      When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.

      " }, "DetectStackResourceDrift":{ "name":"DetectStackResourceDrift", @@ -500,7 +551,7 @@ "shape":"DetectStackResourceDriftOutput", "resultWrapper":"DetectStackResourceDriftResult" }, - "documentation":"

      Returns information about whether a resource's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

      Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

      " + "documentation":"

      Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

      Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

      " }, "DetectStackSetDrift":{ "name":"DetectStackSetDrift", @@ -518,7 +569,7 @@ {"shape":"OperationInProgressException"}, {"shape":"StackSetNotFoundException"} ], - "documentation":"

      Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation performs drift detection on a stack set.

      DetectStackSetDrift returns the OperationId of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, in addition to the number of resources included in each stack.

      Once the operation has completed, use the following actions to return drift information:

      • Use DescribeStackSet to return detailed information about the stack set, including detailed information about the last completed drift operation performed on the stack set. (Information about drift operations that are in progress isn't included.)

      • Use ListStackInstances to return a list of stack instances belonging to the stack set, including the drift status and last drift time checked of each instance.

      • Use DescribeStackInstance to return detailed information about a specific stack instance, including its drift status and last drift time checked.

      For more information about performing a drift detection operation on a stack set, see Detecting unmanaged changes in stack sets.

      You can only run a single drift detection operation on a given stack set at one time.

      To stop a drift detection stack set operation, use StopStackSetOperation .

      " + "documentation":"

      Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation performs drift detection on a stack set.

      DetectStackSetDrift returns the OperationId of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, in addition to the number of resources included in each stack.

      Once the operation has completed, use the following actions to return drift information:

      • Use DescribeStackSet to return detailed information about the stack set, including detailed information about the last completed drift operation performed on the stack set. (Information about drift operations that are in progress isn't included.)

      • Use ListStackInstances to return a list of stack instances belonging to the stack set, including the drift status and last drift time checked of each instance.

      • Use DescribeStackInstance to return detailed information about a specific stack instance, including its drift status and last drift time checked.

      For more information about performing a drift detection operation on a stack set, see Detecting unmanaged changes in stack sets.

      You can only run a single drift detection operation on a given stack set at one time.

      To stop a drift detection stack set operation, use StopStackSetOperation.

      " }, "EstimateTemplateCost":{ "name":"EstimateTemplateCost", @@ -617,7 +668,7 @@ {"shape":"StackNotFoundException"}, {"shape":"StaleRequestException"} ], - "documentation":"

      Import existing stacks into a new stack sets. Use the stack import operation to import up to 10 stacks into a new stack set in the same account as the source stack or in a different administrator account and Region, by specifying the stack ID of the stack you intend to import.

      ImportStacksToStackSet is only supported by self-managed permissions.

      " + "documentation":"

      Import existing stacks into a new stack sets. Use the stack import operation to import up to 10 stacks into a new stack set in the same account as the source stack or in a different administrator account and Region, by specifying the stack ID of the stack you intend to import.

      " }, "ListChangeSets":{ "name":"ListChangeSets", @@ -643,7 +694,7 @@ "shape":"ListExportsOutput", "resultWrapper":"ListExportsResult" }, - "documentation":"

      Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

      For more information, see CloudFormation export stack output values.

      " + "documentation":"

      Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

      For more information, see CloudFormation export stack output values.

      " }, "ListImports":{ "name":"ListImports", @@ -656,7 +707,7 @@ "shape":"ListImportsOutput", "resultWrapper":"ListImportsResult" }, - "documentation":"

      Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

      For more information about importing an exported output value, see the Fn::ImportValue function.

      " + "documentation":"

      Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

      For more information about importing an exported output value, see the Fn::ImportValue function.

      " }, "ListStackInstances":{ "name":"ListStackInstances", @@ -812,7 +863,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

      Publishes the specified extension to the CloudFormation registry as a public extension in this region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

      To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher.

      ", + "documentation":"

      Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

      To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher.

      ", "idempotent":true }, "RecordHandlerProgress":{ @@ -864,7 +915,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

      Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:

      • Validating the extension schema.

      • Determining which handlers, if any, have been specified for the extension.

      • Making the extension available for use in your account.

      For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.

      You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific extension versions if necessary.

      Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

      Once you have registered a private extension in your account and region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      ", + "documentation":"

      Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:

      • Validating the extension schema.

      • Determining which handlers, if any, have been specified for the extension.

      • Making the extension available for use in your account.

      For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.

      You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary.

      Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.

      Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      ", "idempotent":true }, "RollbackStack":{ @@ -907,7 +958,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

      Specifies the configuration data for a registered CloudFormation extension, in the given account and region.

      To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.

      " + "documentation":"

      Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.

      To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.

      " }, "SetTypeDefaultVersion":{ "name":"SetTypeDefaultVersion", @@ -969,7 +1020,7 @@ {"shape":"CFNRegistryException"}, {"shape":"TypeNotFoundException"} ], - "documentation":"

      Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

      • For resource types, this includes passing all contracts tests defined for the type.

      • For modules, this includes determining if the module's model meets all necessary requirements.

      For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

      If you don't specify a version, CloudFormation uses the default version of the extension in your account and region for testing.

      To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.

      Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension.

      An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

      ", + "documentation":"

      Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

      • For resource types, this includes passing all contracts tests defined for the type.

      • For modules, this includes determining if the module's model meets all necessary requirements.

      For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

      If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.

      To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.

      Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension.

      An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

      ", "idempotent":true }, "UpdateStack":{ @@ -1124,6 +1175,16 @@ "min":1, "pattern":"(s3://|http(s?)://).+" }, + "ActivateOrganizationsAccessInput":{ + "type":"structure", + "members":{ + } + }, + "ActivateOrganizationsAccessOutput":{ + "type":"structure", + "members":{ + } + }, "ActivateTypeInput":{ "type":"structure", "members":{ @@ -1145,13 +1206,16 @@ }, "TypeNameAlias":{ "shape":"TypeName", - "documentation":"

      An alias to assign to the public extension, in this account and region. If you specify an alias for the extension, CloudFormation treats the alias as the extension type name within this account and region. You must use the alias to refer to the extension in your templates, API calls, and CloudFormation console.

      An extension alias must be unique within a given account and region. You can activate the same public resource multiple times in the same account and region, using different type name aliases.

      " + "documentation":"

      An alias to assign to the public extension, in this account and Region. If you specify an alias for the extension, CloudFormation treats the alias as the extension type name within this account and Region. You must use the alias to refer to the extension in your templates, API calls, and CloudFormation console.

      An extension alias must be unique within a given account and Region. You can activate the same public resource multiple times in the same account and Region, using different type name aliases.

      " }, "AutoUpdate":{ "shape":"AutoUpdate", - "documentation":"

      Whether to automatically update the extension in this account and region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated.

      The default is true.

      " + "documentation":"

      Whether to automatically update the extension in this account and Region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated.

      The default is true.

      " + }, + "LoggingConfig":{ + "shape":"LoggingConfig", + "documentation":"

      Contains logging configuration information for an extension.

      " }, - "LoggingConfig":{"shape":"LoggingConfig"}, "ExecutionRoleArn":{ "shape":"RoleArn", "documentation":"

      The name of the IAM execution role to use to activate the extension.

      " @@ -1171,7 +1235,7 @@ "members":{ "Arn":{ "shape":"PrivateTypeArn", - "documentation":"

      The Amazon Resource Name (ARN) of the activated extension, in this account and region.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the activated extension, in this account and Region.

      " } } }, @@ -1220,7 +1284,10 @@ "shape":"ErrorMessage", "documentation":"

      The error message.

      " }, - "TypeConfigurationIdentifier":{"shape":"TypeConfigurationIdentifier"} + "TypeConfigurationIdentifier":{ + "shape":"TypeConfigurationIdentifier", + "documentation":"

      Identifying information for the configuration of a CloudFormation extension.

      " + } }, "documentation":"

      Detailed information concerning an error generated during the setting of configuration data for a CloudFormation extension.

      " }, @@ -1291,7 +1358,7 @@ "members":{ "StackName":{ "shape":"StackName", - "documentation":"

      The name or the unique stack ID that's associated with the stack.

      " + "documentation":"

      If you don't pass a parameter to StackName, the API returns a response that describes all resources in the account.

      The IAM policy below can be added to IAM policies when you want to limit resource-level permissions and avoid returning a response when no parameter is sent in the request:

      { \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\": \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }] }

      The name or the unique stack ID that's associated with the stack.

      " }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -1593,7 +1660,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

      A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that CloudFormationknows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received them.

      " + "documentation":"

      A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received them.

      " } }, "documentation":"

      The input for the ContinueUpdateRollback action.

      " @@ -1669,7 +1736,7 @@ }, "ChangeSetType":{ "shape":"ChangeSetType", - "documentation":"

      The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT.

      If you create a change set for a new stack, CloudFormation creates a stack with a unique stack ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS state until you execute the change set.

      By default, CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack.

      " + "documentation":"

      The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT.

      If you create a change set for a new stack, CloudFormation creates a stack with a unique stack ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS state until you execute the change set.

      By default, CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack.

      " }, "ResourcesToImport":{ "shape":"ResourcesToImport", @@ -1678,6 +1745,10 @@ "IncludeNestedStacks":{ "shape":"IncludeNestedStacks", "documentation":"

      Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True.

      " + }, + "OnStackFailure":{ + "shape":"OnStackFailure", + "documentation":"

      Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

      • DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED.

      • DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation.

      • ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation.

      For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.

      " } }, "documentation":"

      The input for the CreateChangeSet action.

      " @@ -1921,12 +1992,22 @@ "exception":true }, "CreationTime":{"type":"timestamp"}, + "DeactivateOrganizationsAccessInput":{ + "type":"structure", + "members":{ + } + }, + "DeactivateOrganizationsAccessOutput":{ + "type":"structure", + "members":{ + } + }, "DeactivateTypeInput":{ "type":"structure", "members":{ "TypeName":{ "shape":"TypeName", - "documentation":"

      The type name of the extension, in this account and region. If you specified a type name alias when enabling the extension, use the type name alias.

      Conditional: You must specify either Arn, or TypeName and Type.

      " + "documentation":"

      The type name of the extension, in this account and Region. If you specified a type name alias when enabling the extension, use the type name alias.

      Conditional: You must specify either Arn, or TypeName and Type.

      " }, "Type":{ "shape":"ThirdPartyType", @@ -1934,7 +2015,7 @@ }, "Arn":{ "shape":"PrivateTypeArn", - "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and region.

      Conditional: You must specify either Arn, or TypeName and Type.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and Region.

      Conditional: You must specify either Arn, or TypeName and Type.

      " } } }, @@ -2290,10 +2371,32 @@ "RootChangeSetId":{ "shape":"ChangeSetId", "documentation":"

      Specifies the change set ID of the root change set in the current nested change set hierarchy.

      " + }, + "OnStackFailure":{ + "shape":"OnStackFailure", + "documentation":"

      Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

      • DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED.

      • DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation.

      • ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation.

      " } }, "documentation":"

      The output for the DescribeChangeSet action.

      " }, + "DescribeOrganizationsAccessInput":{ + "type":"structure", + "members":{ + "CallAs":{ + "shape":"CallAs", + "documentation":"

      [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

      By default, SELF is specified.

      • If you are signed in to the management account, specify SELF.

      • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

        Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

      " + } + } + }, + "DescribeOrganizationsAccessOutput":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"OrganizationStatus", + "documentation":"

      Presents the status of the OrganizationAccess.

      " + } + } + }, "DescribePublisherInput":{ "type":"structure", "members":{ @@ -2586,7 +2689,7 @@ "members":{ "StackName":{ "shape":"StackName", - "documentation":"

      The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

      • Running stacks: You can specify either the stack's name or its unique stack ID.

      • Deleted stacks: You must specify the unique stack ID.

      Default: There is no default value.

      " + "documentation":"

      If you don't pass a parameter to StackName, the API returns a response that describes all resources in the account. This requires ListStacks and DescribeStacks permissions.

      The IAM policy below can be added to IAM policies when you want to limit resource-level permissions and avoid returning a response when no parameter is sent in the request:

      { \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\": \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }] }

      The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

      • Running stacks: You can specify either the stack's name or its unique stack ID.

      • Deleted stacks: You must specify the unique stack ID.

      Default: There is no default value.

      " }, "NextToken":{ "shape":"NextToken", @@ -2655,7 +2758,7 @@ }, "DefaultVersionId":{ "shape":"TypeVersionId", - "documentation":"

      The ID of the default version of the extension. The default version is used when the extension version isn't specified.

      This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType.

      To set the default version of an extension, use SetTypeDefaultVersion .

      " + "documentation":"

      The ID of the default version of the extension. The default version is used when the extension version isn't specified.

      This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType.

      To set the default version of an extension, use SetTypeDefaultVersion.

      " }, "IsDefaultVersion":{ "shape":"IsDefaultVersion", @@ -2719,7 +2822,7 @@ }, "ConfigurationSchema":{ "shape":"ConfigurationSchema", - "documentation":"

      A JSON string that represent the current configuration data for the extension in this account and region.

      To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " + "documentation":"

      A JSON string that represent the current configuration data for the extension in this account and Region.

      To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " }, "PublisherId":{ "shape":"PublisherId", @@ -2727,11 +2830,11 @@ }, "OriginalTypeName":{ "shape":"TypeName", - "documentation":"

      For public extensions that have been activated for this account and region, the type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and region, CloudFormation treats that alias as the extension's type name within the account and region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " }, "OriginalTypeArn":{ "shape":"TypeArn", - "documentation":"

      For public extensions that have been activated for this account and region, the Amazon Resource Name (ARN) of the public extension.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the Amazon Resource Name (ARN) of the public extension.

      " }, "PublicVersionNumber":{ "shape":"PublicVersionNumber", @@ -2743,11 +2846,11 @@ }, "IsActivated":{ "shape":"IsActivated", - "documentation":"

      Whether the extension is activated in the account and region.

      This only applies to public third-party extensions. For all other extensions, CloudFormation returns null.

      " + "documentation":"

      Whether the extension is activated in the account and Region.

      This only applies to public third-party extensions. For all other extensions, CloudFormation returns null.

      " }, "AutoUpdate":{ "shape":"AutoUpdate", - "documentation":"

      Whether CloudFormation automatically updates the extension in this account and region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Activating public extensions for use in your account in the CloudFormation User Guide.

      " + "documentation":"

      Whether CloudFormation automatically updates the extension in this account and Region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Activating public extensions for use in your account in the CloudFormation User Guide.

      " } } }, @@ -2757,7 +2860,7 @@ "members":{ "RegistrationToken":{ "shape":"RegistrationToken", - "documentation":"

      The identifier for this registration request.

      This registration token is generated by CloudFormation when you initiate a registration request using RegisterType .

      " + "documentation":"

      The identifier for this registration request.

      This registration token is generated by CloudFormation when you initiate a registration request using RegisterType.

      " } } }, @@ -2846,7 +2949,10 @@ "shape":"StackSetNameOrId", "documentation":"

      The name of the stack set on which to perform the drift detection operation.

      " }, - "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

      The user-specified preferences for how CloudFormation performs a stack set operation.

      For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

      " + }, "OperationId":{ "shape":"ClientRequestToken", "documentation":"

      The ID of the stack set operation.

      ", @@ -2863,7 +2969,7 @@ "members":{ "OperationId":{ "shape":"ClientRequestToken", - "documentation":"

      The ID of the drift detection stack set operation.

      You can use this operation ID with DescribeStackSetOperation to monitor the progress of the drift detection operation.

      " + "documentation":"

      The ID of the drift detection stack set operation.

      You can use this operation ID with DescribeStackSetOperation to monitor the progress of the drift detection operation.

      " } } }, @@ -2945,7 +3051,7 @@ }, "DisableRollback":{ "shape":"DisableRollback", - "documentation":"

      Preserves the state of previously provisioned resources when an operation fails.

      Default: True

      " + "documentation":"

      Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

      • True - if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to the CreateChangeSet API operation.

      • False - if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the CreateChangeSet API operation.

      Default: True

      " } }, "documentation":"

      The input for the ExecuteChangeSet action.

      " @@ -3245,7 +3351,10 @@ "shape":"OrganizationalUnitIdList", "documentation":"

      The list of OU ID's to which the stacks being imported has to be mapped as deployment target.

      " }, - "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

      The user-specified preferences for how CloudFormation performs a stack set operation.

      For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

      " + }, "OperationId":{ "shape":"ClientRequestToken", "documentation":"

      A unique, user defined, identifier for the stack set operation.

      ", @@ -3677,7 +3786,7 @@ "members":{ "RegistrationTokenList":{ "shape":"RegistrationTokenList", - "documentation":"

      A list of extension registration tokens.

      Use DescribeTypeRegistration to return detailed information about a type registration request.

      " + "documentation":"

      A list of extension registration tokens.

      Use DescribeTypeRegistration to return detailed information about a type registration request.

      " }, "NextToken":{ "shape":"NextToken", @@ -3736,7 +3845,7 @@ "members":{ "Visibility":{ "shape":"Visibility", - "documentation":"

      The scope at which the extensions are visible and usable in CloudFormation operations.

      Valid values include:

      • PRIVATE: Extensions that are visible and usable within this account and region. This includes:

        • Private extensions you have registered in this account and region.

        • Public extensions that you have activated in this account and region.

      • PUBLIC: Extensions that are publicly visible and available to be activated within any Amazon Web Services account. This includes extensions from Amazon Web Services, in addition to third-party publishers.

      The default is PRIVATE.

      " + "documentation":"

      The scope at which the extensions are visible and usable in CloudFormation operations.

      Valid values include:

      • PRIVATE: Extensions that are visible and usable within this account and Region. This includes:

        • Private extensions you have registered in this account and Region.

        • Public extensions that you have activated in this account and Region.

      • PUBLIC: Extensions that are publicly visible and available to be activated within any Amazon Web Services account. This includes extensions from Amazon Web Services, in addition to third-party publishers.

      The default is PRIVATE.

      " }, "ProvisioningType":{ "shape":"ProvisioningType", @@ -3849,10 +3958,10 @@ }, "LogicalIdHierarchy":{ "shape":"LogicalIdHierarchy", - "documentation":"

      A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /.

      In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB.

      moduleA/moduleB

      For more information, see Referencing resources in a module in the CloudFormation User Guide.

      " + "documentation":"

      A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /.

      In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB.

      moduleA/moduleB

      For more information, see Referencing resources in a module in the CloudFormation User Guide.

      " } }, - "documentation":"

      Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

      For more information about modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.

      " + "documentation":"

      Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

      For more information about modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.

      " }, "MonitoringTimeInMinutes":{ "type":"integer", @@ -3891,6 +4000,14 @@ "DELETE" ] }, + "OnStackFailure":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK", + "DELETE" + ] + }, "OperationIdAlreadyExistsException":{ "type":"structure", "members":{ @@ -3981,6 +4098,14 @@ "type":"string", "max":4096 }, + "OrganizationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "DISABLED_PERMANENTLY" + ] + }, "OrganizationalUnitId":{ "type":"string", "pattern":"^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$" @@ -4034,7 +4159,7 @@ }, "ResolvedValue":{ "shape":"ParameterValue", - "documentation":"

      Read-only. The value that corresponds to a SSM parameter key. This field is returned only for SSM parameter types in the template.

      " + "documentation":"

      Read-only. The value that corresponds to a SSM parameter key. This field is returned only for SSM parameter types in the template.

      " } }, "documentation":"

      The Parameter data type.

      " @@ -4334,7 +4459,7 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

      For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principle (resources.cloudformation.amazonaws.com). For more information about adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide.

      If your extension calls Amazon Web Services APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

      For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principle (resources.cloudformation.amazonaws.com). For more information about adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide.

      If your extension calls Amazon Web Services APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.

      " }, "ClientRequestToken":{ "shape":"RequestToken", @@ -4347,7 +4472,7 @@ "members":{ "RegistrationToken":{ "shape":"RegistrationToken", - "documentation":"

      The identifier for this registration request.

      Use this registration token when calling DescribeTypeRegistration , which returns information about the status and IDs of the extension registration.

      " + "documentation":"

      The identifier for this registration request.

      Use this registration token when calling DescribeTypeRegistration, which returns information about the status and IDs of the extension registration.

      " } } }, @@ -4396,11 +4521,11 @@ "members":{ "TypeNameAlias":{ "shape":"TypeName", - "documentation":"

      An alias assigned to the public extension, in this account and region. If you specify an alias for the extension, CloudFormation treats the alias as the extension type name within this account and region. You must use the alias to refer to the extension in your templates, API calls, and CloudFormation console.

      " + "documentation":"

      An alias assigned to the public extension, in this account and Region. If you specify an alias for the extension, CloudFormation treats the alias as the extension type name within this account and Region. You must use the alias to refer to the extension in your templates, API calls, and CloudFormation console.

      " }, "OriginalTypeName":{ "shape":"TypeName", - "documentation":"

      The type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and region, CloudFormation treats that alias as the extension's type name within the account and region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " + "documentation":"

      The type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " }, "PublisherId":{ "shape":"PublisherId", @@ -4778,11 +4903,11 @@ "members":{ "TypeArn":{ "shape":"TypeArn", - "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and region. For private extensions, this will be the ARN assigned when you register the type in this account and region.

      Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and Region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and Region. For private extensions, this will be the ARN assigned when you register the type in this account and Region.

      Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.

      " }, "Configuration":{ "shape":"TypeConfiguration", - "documentation":"

      The configuration data for the extension, in this account and region.

      The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of API_DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.

      " + "documentation":"

      The configuration data for the extension, in this account and Region.

      The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.

      " }, "ConfigurationAlias":{ "shape":"TypeConfigurationAlias", @@ -4803,7 +4928,7 @@ "members":{ "ConfigurationArn":{ "shape":"TypeConfigurationArn", - "documentation":"

      The Amazon Resource Name (ARN) for the configuration data, in this account and region.

      Conditional: You must specify ConfigurationArn, or Type and TypeName.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the configuration data, in this account and Region.

      Conditional: You must specify ConfigurationArn, or Type and TypeName.

      " } } }, @@ -4955,7 +5080,7 @@ }, "DriftInformation":{ "shape":"StackDriftInformation", - "documentation":"

      Information about whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      " + "documentation":"

      Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      " } }, "documentation":"

      The Stack data type.

      " @@ -5163,7 +5288,7 @@ "members":{ "DetailedStatus":{ "shape":"StackInstanceDetailedStatus", - "documentation":"
      • CANCELLED: The operation in the specified account and Region has been canceled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.

      • FAILED: The operation in the specified account and Region failed. If the stack set operation fails in enough accounts within a Region, the failure tolerance for the stack set operation as a whole might be exceeded.

      • INOPERABLE: A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true, to delete the stack instance, and then delete the stack manually.

      • PENDING: The operation in the specified account and Region has yet to start.

      • RUNNING: The operation in the specified account and Region is currently in progress.

      • SUCCEEDED: The operation in the specified account and Region completed successfully.

      " + "documentation":"
      • CANCELLED: The operation in the specified account and Region has been canceled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.

      • FAILED: The operation in the specified account and Region failed. If the stack set operation fails in enough accounts within a Region, the failure tolerance for the stack set operation as a whole might be exceeded.

      • INOPERABLE: A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true, to delete the stack instance, and then delete the stack manually.

      • PENDING: The operation in the specified account and Region has yet to start.

      • RUNNING: The operation in the specified account and Region is currently in progress.

      • SKIPPED_SUSPENDED_ACCOUNT: The operation in the specified account and Region has been skipped because the account was suspended at the time of the operation.

      • SUCCEEDED: The operation in the specified account and Region completed successfully.

      " } }, "documentation":"

      The detailed status of the stack instance.

      " @@ -5176,7 +5301,8 @@ "SUCCEEDED", "FAILED", "CANCELLED", - "INOPERABLE" + "INOPERABLE", + "SKIPPED_SUSPENDED_ACCOUNT" ] }, "StackInstanceFilter":{ @@ -5664,7 +5790,10 @@ "shape":"ManagedExecution", "documentation":"

      Describes whether StackSets performs non-conflicting operations concurrently and queues conflicting operations.

      " }, - "Regions":{"shape":"RegionList"} + "Regions":{ + "shape":"RegionList", + "documentation":"

      Returns a list of all Amazon Web Services Regions the given StackSet has stack instances deployed in. The Amazon Web Services Regions list output is in no particular order.

      " + } }, "documentation":"

      A structure that contains information about a stack set. A stack set enables you to provision stacks into Amazon Web Services accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, in addition to any parameters and capabilities that the template requires.

      " }, @@ -5835,7 +5964,7 @@ }, "RegionOrder":{ "shape":"RegionList", - "documentation":"

      The order of the Regions in where you want to perform the stack operation.

      " + "documentation":"

      The order of the Regions where you want to perform the stack operation.

      " }, "FailureToleranceCount":{ "shape":"FailureToleranceCount", @@ -5957,7 +6086,10 @@ "shape":"StackSetOperationStatusDetails", "documentation":"

      Detailed information about the stack set operation.

      " }, - "OperationPreferences":{"shape":"StackSetOperationPreferences"} + "OperationPreferences":{ + "shape":"StackSetOperationPreferences", + "documentation":"

      The user-specified preferences for how CloudFormation performs a stack set operation.

      For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

      " + } }, "documentation":"

      The structures that contain summary information about the specified operation.

      " }, @@ -6101,7 +6233,7 @@ }, "DriftInformation":{ "shape":"StackDriftInformationSummary", - "documentation":"

      Summarizes information about whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      " + "documentation":"

      Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

      " } }, "documentation":"

      The StackSummary Data Type

      " @@ -6258,7 +6390,7 @@ }, "VersionId":{ "shape":"TypeVersionId", - "documentation":"

      The version of the extension to test.

      You can specify the version id with either Arn, or with TypeName and Type.

      If you don't specify a version, CloudFormation uses the default version of the extension in this account and region for testing.

      " + "documentation":"

      The version of the extension to test.

      You can specify the version id with either Arn, or with TypeName and Type.

      If you don't specify a version, CloudFormation uses the default version of the extension in this account and Region for testing.

      " }, "LogDeliveryBucket":{ "shape":"S3Bucket", @@ -6342,7 +6474,7 @@ "members":{ "Arn":{ "shape":"TypeConfigurationArn", - "documentation":"

      The Amazon Resource Name (ARN) for the configuration data, in this account and region.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the configuration data, in this account and Region.

      " }, "Alias":{ "shape":"TypeConfigurationAlias", @@ -6350,7 +6482,7 @@ }, "Configuration":{ "shape":"TypeConfiguration", - "documentation":"

      A JSON string specifying the configuration data for the extension, in this account and region.

      If a configuration hasn't been set for a specified extension, CloudFormation returns {}.

      " + "documentation":"

      A JSON string specifying the configuration data for the extension, in this account and Region.

      If a configuration hasn't been set for a specified extension, CloudFormation returns {}.

      " }, "LastUpdated":{ "shape":"Timestamp", @@ -6358,7 +6490,7 @@ }, "TypeArn":{ "shape":"TypeArn", - "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and region. For private extensions, this will be the ARN assigned when you register the type in this account and region.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and Region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and Region. For private extensions, this will be the ARN assigned when you register the type in this account and Region.

      " }, "TypeName":{ "shape":"TypeName", @@ -6369,7 +6501,7 @@ "documentation":"

      Whether this configuration data is the default configuration for the extension.

      " } }, - "documentation":"

      Detailed information concerning the specification of a CloudFormation extension in a given account and region.

      For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " + "documentation":"

      Detailed information concerning the specification of a CloudFormation extension in a given account and Region.

      For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

      " }, "TypeConfigurationDetailsList":{ "type":"list", @@ -6380,7 +6512,7 @@ "members":{ "TypeArn":{ "shape":"TypeArn", - "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and region. For private extensions, this will be the ARN assigned when you register the type in this account and region.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the extension, in this account and Region.

      For public extensions, this will be the ARN assigned when you activate the type in this account and Region. For private extensions, this will be the ARN assigned when you register the type in this account and Region.

      " }, "TypeConfigurationAlias":{ "shape":"TypeConfigurationAlias", @@ -6388,7 +6520,7 @@ }, "TypeConfigurationArn":{ "shape":"TypeConfigurationArn", - "documentation":"

      The Amazon Resource Name (ARN) for the configuration, in this account and region.

      " + "documentation":"

      The Amazon Resource Name (ARN) for the configuration, in this account and Region.

      " }, "Type":{ "shape":"ThirdPartyType", @@ -6423,7 +6555,7 @@ "members":{ "Category":{ "shape":"Category", - "documentation":"

      The category of extensions to return.

      • REGISTERED: Private extensions that have been registered for this account and region.

      • ACTIVATED: Public extensions that have been activated for this account and region.

      • THIRD_PARTY: Extensions available for use from publishers other than Amazon. This includes:

        • Private extensions registered in the account.

        • Public extensions from publishers other than Amazon, whether activated or not.

      • AWS_TYPES: Extensions available for use from Amazon.

      " + "documentation":"

      The category of extensions to return.

      • REGISTERED: Private extensions that have been registered for this account and Region.

      • ACTIVATED: Public extensions that have been activated for this account and Region.

      • THIRD_PARTY: Extensions available for use from publishers other than Amazon. This includes:

        • Private extensions registered in the account.

        • Public extensions from publishers other than Amazon, whether activated or not.

      • AWS_TYPES: Extensions available for use from Amazon.

      " }, "PublisherId":{ "shape":"PublisherId", @@ -6479,11 +6611,11 @@ }, "TypeName":{ "shape":"TypeName", - "documentation":"

      The name of the extension.

      If you specified a TypeNameAlias when you activate this extension in your account and region, CloudFormation considers that alias as the type name.

      " + "documentation":"

      The name of the extension.

      If you specified a TypeNameAlias when you activate this extension in your account and Region, CloudFormation considers that alias as the type name.

      " }, "DefaultVersionId":{ "shape":"TypeVersionId", - "documentation":"

      The ID of the default version of the extension. The default version is used when the extension version isn't specified.

      This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null. For more information, see RegisterType.

      To set the default version of an extension, use SetTypeDefaultVersion .

      " + "documentation":"

      The ID of the default version of the extension. The default version is used when the extension version isn't specified.

      This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null. For more information, see RegisterType.

      To set the default version of an extension, use SetTypeDefaultVersion.

      " }, "TypeArn":{ "shape":"TypeArn", @@ -6503,15 +6635,15 @@ }, "OriginalTypeName":{ "shape":"TypeName", - "documentation":"

      For public extensions that have been activated for this account and region, the type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and region, CloudFormation treats that alias as the extension's type name within the account and region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the type name of the public extension.

      If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

      " }, "PublicVersionNumber":{ "shape":"PublicVersionNumber", - "documentation":"

      For public extensions that have been activated for this account and region, the version of the public extension to be used for CloudFormation operations in this account and Region.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " }, "LatestPublicVersion":{ "shape":"PublicVersionNumber", - "documentation":"

      For public extensions that have been activated for this account and region, the latest version of the public extension that is available. For any extensions other than activated third-arty extensions, CloudFormation returns null.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the latest version of the public extension that is available. For any extensions other than activated third-arty extensions, CloudFormation returns null.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " }, "PublisherIdentity":{ "shape":"IdentityProvider", @@ -6523,7 +6655,7 @@ }, "IsActivated":{ "shape":"IsActivated", - "documentation":"

      Whether the extension is activated for this account and region.

      This applies only to third-party public extensions. Extensions published by Amazon are activated by default.

      " + "documentation":"

      Whether the extension is activated for this account and Region.

      This applies only to third-party public extensions. Extensions published by Amazon are activated by default.

      " } }, "documentation":"

      Contains summary information about the specified CloudFormation extension.

      " @@ -6586,7 +6718,7 @@ }, "PublicVersionNumber":{ "shape":"PublicVersionNumber", - "documentation":"

      For public extensions that have been activated for this account and region, the version of the public extension to be used for CloudFormation operations in this account and region. For any extensions other than activated third-arty extensions, CloudFormation returns null.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " + "documentation":"

      For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region. For any extensions other than activated third-arty extensions, CloudFormation returns null.

      How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide.

      " } }, "documentation":"

      Contains summary information about a specific version of a CloudFormation extension.

      " @@ -6909,5 +7041,5 @@ ] } }, - "documentation":"CloudFormation

      CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure.

      With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

      For more information about CloudFormation, see the CloudFormation product page.

      CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com .

      " + "documentation":"CloudFormation

      CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure.

      With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

      For more information about CloudFormation, see the CloudFormation product page.

      CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.

      " } diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index add28699e0d4..9f5831a7512a 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 1d0d6e15bc5b..654c2a334360 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 7465eaaba94a..26eed888a882 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index a18b46cfbce2..a13eb897ad07 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 0b49bdd26f6d..bfeb2041fe5f 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 9cfb8338264f..70a433e5f36d 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudtrail/src/main/resources/codegen-resources/endpoint-tests.json index 3e60ff53d1ed..e68699853ca8 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -451,8 +451,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -464,8 +464,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -477,8 +477,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -490,8 +490,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -503,8 +503,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -516,8 +516,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -529,8 +529,19 @@ }, "params": { "Region": "us-iso-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -542,8 +553,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -555,8 +577,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -568,8 +601,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -581,8 +625,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -594,8 +638,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -606,8 +650,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -618,10 +662,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index 94ecd24d696a..9ded7eeb1e5f 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -24,6 +24,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"CloudTrailARNInvalidException"}, + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"ChannelARNInvalidException"}, {"shape":"ResourceTypeNotSupportedException"}, {"shape":"TagsLimitExceededException"}, {"shape":"InvalidTrailNameException"}, @@ -37,7 +39,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"ConflictException"} ], - "documentation":"

      Adds one or more tags to a trail, event data store, or channel, up to a limit of 50. Overwrites an existing tag's value when a new value is specified for an existing tag key. Tag key names must be unique; you cannot have two keys with the same name but different values. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail or event data store that applies to all Amazon Web Services Regions only from the Region in which the trail or event data store was created (also known as its home region).

      ", + "documentation":"

      Adds one or more tags to a trail, event data store, or channel, up to a limit of 50. Overwrites an existing tag's value when a new value is specified for an existing tag key. Tag key names must be unique; you cannot have two keys with the same name but different values. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail or event data store that applies to all Amazon Web Services Regions only from the Region in which the trail or event data store was created (also known as its home Region).

      ", "idempotent":true }, "CancelQuery":{ @@ -240,7 +242,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

      Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

      ", + "documentation":"

      Deletes a trail. This operation must be called from the Region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other Regions) of a trail that is enabled in all Regions.

      ", "idempotent":true }, "DeregisterOrganizationDelegatedAdmin":{ @@ -285,7 +287,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. You must specify an ARN for EventDataStore, and a value for QueryID.

      ", + "documentation":"

      Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. If the query results were delivered to an S3 bucket, the response also provides the S3 URI and the delivery status.

      You must specify either a QueryID or a QueryAlias. Specifying the QueryAlias parameter returns information about the last query run for the alias.

      ", "idempotent":true }, "DescribeTrails":{ @@ -302,7 +304,7 @@ {"shape":"InvalidTrailNameException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Retrieves settings for one or more trails associated with the current region for your account.

      ", + "documentation":"

      Retrieves settings for one or more trails associated with the current Region for your account.

      ", "idempotent":true }, "GetChannel":{ @@ -357,7 +359,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following:

      • If your event selector includes read-only events, write-only events, or all events. This applies to both management events and data events.

      • If your event selector includes management events.

      • If your event selector includes data events, the resources on which you are logging data events.

      For more information about logging management and data events, see the following topics in the CloudTrail User Guide:

      ", + "documentation":"

      Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following:

      • If your event selector includes read-only events, write-only events, or all events. This applies to both management events and data events.

      • If your event selector includes management events.

      • If your event selector includes data events, the resources on which you are logging data events.

      For more information about logging management and data events, see the following topics in the CloudTrail User Guide:

      ", "idempotent":true }, "GetImport":{ @@ -417,7 +419,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Gets event data results of a query. You must specify the QueryID value returned by the StartQuery operation, and an ARN for EventDataStore.

      " + "documentation":"

      Gets event data results of a query. You must specify the QueryID value returned by the StartQuery operation.

      " }, "GetResourcePolicy":{ "name":"GetResourcePolicy", @@ -471,7 +473,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

      Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region.

      ", + "documentation":"

      Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single Region. To return trail status from all Regions, you must call the operation on each Region.

      ", "idempotent":true }, "ListChannels":{ @@ -505,7 +507,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Returns information about all event data stores in the account, in the current region.

      ", + "documentation":"

      Returns information about all event data stores in the account, in the current Region.

      ", "idempotent":true }, "ListImportFailures":{ @@ -556,7 +558,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"InvalidTokenException"} ], - "documentation":"

      Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

      CloudTrail uses different private and public key pairs per region. Each digest file is signed with a private key unique to its region. When you validate a digest file from a specific region, you must look in the same region for its corresponding public key.

      ", + "documentation":"

      Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

      CloudTrail uses different private and public key pairs per Region. Each digest file is signed with a private key unique to its Region. When you validate a digest file from a specific Region, you must look in the same Region for its corresponding public key.

      ", "idempotent":true }, "ListQueries":{ @@ -594,6 +596,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"CloudTrailARNInvalidException"}, + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"ChannelARNInvalidException"}, {"shape":"ResourceTypeNotSupportedException"}, {"shape":"InvalidTrailNameException"}, {"shape":"InactiveEventDataStoreException"}, @@ -603,7 +607,7 @@ {"shape":"InvalidTokenException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Lists the tags for the trail, event data store, or channel in the current region.

      ", + "documentation":"

      Lists the tags for the specified trails, event data stores, or channels in the current Region.

      ", "idempotent":true }, "ListTrails":{ @@ -638,7 +642,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

      Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

      • Amazon Web Services access key

      • Event ID

      • Event name

      • Event source

      • Read only

      • Resource name

      • Resource type

      • User name

      Lookup supports the following attributes for Insights events:

      • Event ID

      • Event name

      • Event source

      All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

      The rate of lookup requests is limited to two per second, per account, per region. If this limit is exceeded, a throttling error occurs.

      ", + "documentation":"

      Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a Region within the last 90 days. Lookup supports the following attributes for management events:

      • Amazon Web Services access key

      • Event ID

      • Event name

      • Event source

      • Read only

      • Resource name

      • Resource type

      • User name

      Lookup supports the following attributes for Insights events:

      • Event ID

      • Event name

      • Event source

      All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

      The rate of lookup requests is limited to two per second, per account, per Region. If this limit is exceeded, a throttling error occurs.

      ", "idempotent":true }, "PutEventSelectors":{ @@ -655,13 +659,14 @@ {"shape":"CloudTrailARNInvalidException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"InvalidEventSelectorsException"}, + {"shape":"ConflictException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

      Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

      When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

      Example

      1. You create an event selector for a trail and specify that you want write-only events.

      2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

      3. CloudTrail evaluates whether the events match your event selectors.

      4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

      5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

      The PutEventSelectors operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

      You can configure up to five event selectors for each trail. For more information, see Logging management events for trails , Logging data events for trails , and Quotas in CloudTrail in the CloudTrail User Guide.

      You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events for trails in the CloudTrail User Guide.

      ", + "documentation":"

      Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

      When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

      Example

      1. You create an event selector for a trail and specify that you want write-only events.

      2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

      3. CloudTrail evaluates whether the events match your event selectors.

      4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

      5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

      The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

      You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.

      You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

      ", "idempotent":true }, "PutInsightSelectors":{ @@ -687,7 +692,7 @@ {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

      ", + "documentation":"

      Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

      To log CloudTrail Insights events on API call volume, the trail must log write management events. To log CloudTrail Insights events on API error rate, the trail must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events.

      ", "idempotent":true }, "PutResourcePolicy":{ @@ -746,6 +751,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"CloudTrailARNInvalidException"}, + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"ChannelARNInvalidException"}, {"shape":"ResourceTypeNotSupportedException"}, {"shape":"InvalidTrailNameException"}, {"shape":"InvalidTagParameterException"}, @@ -785,6 +792,28 @@ ], "documentation":"

      Restores a deleted event data store specified by EventDataStore, which accepts an event data store ARN. You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store can take several minutes, depending on the size of the event data store.

      " }, + "StartEventDataStoreIngestion":{ + "name":"StartEventDataStoreIngestion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEventDataStoreIngestionRequest"}, + "output":{"shape":"StartEventDataStoreIngestionResponse"}, + "errors":[ + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"NotOrganizationMasterAccountException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InsufficientDependencyServiceAccessPermissionException"} + ], + "documentation":"

      Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, or ConfigurationItem.

      " + }, "StartImport":{ "name":"StartImport", "http":{ @@ -831,7 +860,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

      Starts the recording of Amazon Web Services API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

      ", + "documentation":"

      Starts the recording of Amazon Web Services API calls and log file delivery for a trail. For a trail that is enabled in all Regions, this operation must be called from the Region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other Regions) of a trail that is enabled in all Regions.

      ", "idempotent":true }, "StartQuery":{ @@ -858,9 +887,31 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

      Starts a CloudTrail Lake query. The required QueryStatement parameter provides your SQL query, enclosed in single quotation marks. Use the optional DeliveryS3Uri parameter to deliver the query results to an S3 bucket.

      ", + "documentation":"

      Starts a CloudTrail Lake query. Use the QueryStatement parameter to provide your SQL query, enclosed in single quotation marks. Use the optional DeliveryS3Uri parameter to deliver the query results to an S3 bucket.

      StartQuery requires you specify either the QueryStatement parameter, or a QueryAlias and any QueryParameters. In the current release, the QueryAlias and QueryParameters parameters are used only for the queries that populate the CloudTrail Lake dashboards.

      ", "idempotent":true }, + "StopEventDataStoreIngestion":{ + "name":"StopEventDataStoreIngestion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEventDataStoreIngestionRequest"}, + "output":{"shape":"StopEventDataStoreIngestionResponse"}, + "errors":[ + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"NotOrganizationMasterAccountException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InsufficientDependencyServiceAccessPermissionException"} + ], + "documentation":"

      Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, or ConfigurationItem.

      " + }, "StopImport":{ "name":"StopImport", "http":{ @@ -897,7 +948,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

      Suspends the recording of Amazon Web Services API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions.

      ", + "documentation":"

      Suspends the recording of Amazon Web Services API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all Regions, this operation must be called from the Region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other Regions) of a trail enabled in all Regions.

      ", "idempotent":true }, "UpdateChannel":{ @@ -932,6 +983,7 @@ "input":{"shape":"UpdateEventDataStoreRequest"}, "output":{"shape":"UpdateEventDataStoreResponse"}, "errors":[ + {"shape":"EventDataStoreAlreadyExistsException"}, {"shape":"EventDataStoreARNInvalidException"}, {"shape":"EventDataStoreNotFoundException"}, {"shape":"InvalidEventSelectorsException"}, @@ -951,7 +1003,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"OrganizationNotInAllFeaturesModeException"} ], - "documentation":"

      Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 90 and 2557. By default, TerminationProtection is enabled.

      For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management and data events in your event data store. For more information about AdvancedEventSelectors, see PutEventSelectorsRequest$AdvancedEventSelectors.

      For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

      ", + "documentation":"

      Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 90 and 2557. By default, TerminationProtection is enabled.

      For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management and data events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors.

      For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

      ", "idempotent":true }, "UpdateTrail":{ @@ -996,7 +1048,7 @@ {"shape":"CloudTrailInvalidClientTokenIdException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

      Updates trail settings that control what events you are logging, and how to handle log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

      ", + "documentation":"

      Updates trail settings that control what events you are logging, and how to handle log files. Changes to a trail do not require stopping the CloudTrail service. Use this action to designate an existing bucket for log delivery. If the existing bucket has previously been a target for CloudTrail log files, an IAM policy exists for the bucket. UpdateTrail must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

      ", "idempotent":true } }, @@ -1044,7 +1096,7 @@ "members":{ "ResourceId":{ "shape":"String", - "documentation":"

      Specifies the ARN of the trail, event data store, or channel to which one or more tags will be added.

      The format of a trail ARN is: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      The format of an event data store ARN is: arn:aws:cloudtrail:us-east-2:12345678910:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      The format of a channel ARN is: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      " + "documentation":"

      Specifies the ARN of the trail, event data store, or channel to which one or more tags will be added.

      The format of a trail ARN is: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      The format of an event data store ARN is: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      The format of a channel ARN is: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      " }, "TagsList":{ "shape":"TagsList", @@ -1072,7 +1124,7 @@ "documentation":"

      Contains all selector statements in an advanced event selector.

      " } }, - "documentation":"

      Advanced event selectors let you create fine-grained selectors for the following CloudTrail event record fields. They help you control costs by logging only those events that are important to you. For more information about advanced event selectors, see Logging data events for trails in the CloudTrail User Guide.

      • readOnly

      • eventSource

      • eventName

      • eventCategory

      • resources.type

      • resources.ARN

      You cannot apply both event selectors and advanced event selectors to a trail.

      " + "documentation":"

      Advanced event selectors let you create fine-grained selectors for the following CloudTrail event record fields. They help you control costs by logging only those events that are important to you. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

      • readOnly

      • eventSource

      • eventName

      • eventCategory

      • resources.type

      • resources.ARN

      You cannot apply both event selectors and advanced event selectors to a trail.

      " }, "AdvancedEventSelectors":{ "type":"list", @@ -1084,7 +1136,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

      A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

      For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

      For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

      • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

      • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

      • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

      • eventCategory - This is required and must be set to Equals.

        • For CloudTrail event records, the value must be Management or Data.

        • For Config configuration items, the value must be ConfigurationItem.

        • For Audit Manager evidence, the value must be Evidence.

        • For non-Amazon Web Services events, the value must be ActivityAuditLog.

      • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

        • AWS::CloudTrail::Channel

        • AWS::S3::Object

        • AWS::Lambda::Function

        • AWS::DynamoDB::Table

        • AWS::S3Outposts::Object

        • AWS::ManagedBlockchain::Node

        • AWS::S3ObjectLambda::AccessPoint

        • AWS::EC2::Snapshot

        • AWS::S3::AccessPoint

        • AWS::DynamoDB::Stream

        • AWS::Glue::Table

        • AWS::FinSpace::Environment

        • AWS::SageMaker::ExperimentTrialComponent

        • AWS::SageMaker::FeatureGroup

        You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

      • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

        The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

        • arn:<partition>:s3:::<bucket_name>/

        • arn:<partition>:s3:::<bucket_name>/<object_path>/

        When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

        • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

        • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

        When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

        When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

        When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

        When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

        When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

        When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

        When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

        When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

        When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

        When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

        When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

        When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

      " + "documentation":"

      A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

      For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

      For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

      • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

      • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

      • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

      • eventCategory - This is required and must be set to Equals.

        • For CloudTrail event records, the value must be Management or Data.

        • For Config configuration items, the value must be ConfigurationItem.

        • For Audit Manager evidence, the value must be Evidence.

        • For non-Amazon Web Services events, the value must be ActivityAuditLog.

      • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

        • AWS::DynamoDB::Table

        • AWS::Lambda::Function

        • AWS::S3::Object

        • AWS::CloudTrail::Channel

        • AWS::CodeWhisperer::Profile

        • AWS::Cognito::IdentityPool

        • AWS::DynamoDB::Stream

        • AWS::EC2::Snapshot

        • AWS::EMRWAL::Workspace

        • AWS::FinSpace::Environment

        • AWS::Glue::Table

        • AWS::GuardDuty::Detector

        • AWS::KendraRanking::ExecutionPlan

        • AWS::ManagedBlockchain::Node

        • AWS::SageMaker::ExperimentTrialComponent

        • AWS::SageMaker::FeatureGroup

        • AWS::S3::AccessPoint

        • AWS::S3ObjectLambda::AccessPoint

        • AWS::S3Outposts::Object

        You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

      • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

        The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

        • arn:<partition>:s3:::<bucket_name>/

        • arn:<partition>:s3:::<bucket_name>/<object_path>/

        When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

        When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

        When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

        When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

        When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

        When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

        When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

        When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:emrwal:<region>::workspace/<workspace_name>

        When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

        When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

        When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

        When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

        When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

        When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

        When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

        When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

        • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

        • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

        When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

        When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

        • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

      " }, "Equals":{ "shape":"Operator", @@ -1229,7 +1281,7 @@ "type":"structure", "members":{ }, - "documentation":"

      This exception is thrown when an operation is called with a trail ARN that is not valid. The following is the format of a trail ARN.

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      This exception is also thrown when you call AddTags or RemoveTags on a trail, event data store, or channel with a resource ARN that is not valid.

      The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:12345678910:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      ", + "documentation":"

      This exception is thrown when an operation is called with a trail ARN that is not valid. The following is the format of a trail ARN.

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      This exception is also thrown when you call AddTags or RemoveTags on a trail, event data store, or channel with a resource ARN that is not valid.

      The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      ", "exception":true }, "CloudTrailAccessNotEnabledException":{ @@ -1250,7 +1302,7 @@ "type":"structure", "members":{ }, - "documentation":"

      Cannot set a CloudWatch Logs delivery for this region.

      ", + "documentation":"

      Cannot set a CloudWatch Logs delivery for this Region.

      ", "exception":true }, "ConflictException":{ @@ -1319,7 +1371,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Specifies whether the event data store includes events from all regions, or only from the region in which the event data store is created.

      " + "documentation":"

      Specifies whether the event data store includes events from all Regions, or only from the Region in which the event data store is created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -1337,6 +1389,10 @@ "KmsKeyId":{ "shape":"EventDataStoreKmsKeyId", "documentation":"

      Specifies the KMS key ID to use to encrypt the events delivered by CloudTrail. The value can be an alias name prefixed by alias/, a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.

      Disabling or deleting the KMS key, or removing CloudTrail permissions on the key, prevents CloudTrail from logging events to the event data store, and prevents users from querying the data in the event data store that was encrypted with the key. After you associate an event data store with a KMS key, the KMS key cannot be removed or changed. Before you disable or delete a KMS key that you are using with an event data store, delete or back up your event data store.

      CloudTrail also supports KMS multi-Region keys. For more information about multi-Region keys, see Using multi-Region keys in the Key Management Service Developer Guide.

      Examples:

      • alias/MyAliasName

      • arn:aws:kms:us-east-2:123456789012:alias/MyAliasName

      • arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012

      • 12345678-1234-1234-1234-123456789012

      " + }, + "StartIngestion":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the event data store should start ingesting live events. The default is true.

      " } } }, @@ -1361,7 +1417,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Indicates whether the event data store collects events from all regions, or only from the region in which it was created.

      " + "documentation":"

      Indicates whether the event data store collects events from all Regions, or only from the Region in which it was created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -1419,7 +1475,7 @@ }, "IsMultiRegionTrail":{ "shape":"Boolean", - "documentation":"

      Specifies whether the trail is created in the current region or in all regions. The default is false, which creates a trail only in the region where you are signed in. As a best practice, consider creating trails that log events in all regions.

      " + "documentation":"

      Specifies whether the trail is created in the current Region or in all Regions. The default is false, which creates a trail only in the Region where you are signed in. As a best practice, consider creating trails that log events in all Regions.

      " }, "EnableLogFileValidation":{ "shape":"Boolean", @@ -1462,7 +1518,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

      This field is no longer in use. Use SnsTopicARN.

      ", + "documentation":"

      This field is no longer in use. Use SnsTopicARN.

      ", "deprecated":true }, "SnsTopicARN":{ @@ -1475,7 +1531,7 @@ }, "IsMultiRegionTrail":{ "shape":"Boolean", - "documentation":"

      Specifies whether the trail exists in one region or in all regions.

      " + "documentation":"

      Specifies whether the trail exists in one Region or in all Regions.

      " }, "TrailARN":{ "shape":"String", @@ -1509,7 +1565,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

      The resource type in which you want to log data events. You can specify the following basic event selector resource types:

      • AWS::S3::Object

      • AWS::Lambda::Function

      • AWS::DynamoDB::Table

      The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector$Field.

      • AWS::CloudTrail::Channel

      • AWS::S3Outposts::Object

      • AWS::ManagedBlockchain::Node

      • AWS::S3ObjectLambda::AccessPoint

      • AWS::EC2::Snapshot

      • AWS::S3::AccessPoint

      • AWS::DynamoDB::Stream

      • AWS::Glue::Table

      • AWS::FinSpace::Environment

      • AWS::SageMaker::ExperimentTrialComponent

      • AWS::SageMaker::FeatureGroup

      " + "documentation":"

      The resource type in which you want to log data events. You can specify the following basic event selector resource types:

      • AWS::DynamoDB::Table

      • AWS::Lambda::Function

      • AWS::S3::Object

      The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector.

      • AWS::CloudTrail::Channel

      • AWS::CodeWhisperer::Profile

      • AWS::Cognito::IdentityPool

      • AWS::DynamoDB::Stream

      • AWS::EC2::Snapshot

      • AWS::EMRWAL::Workspace

      • AWS::FinSpace::Environment

      • AWS::Glue::Table

      • AWS::GuardDuty::Detector

      • AWS::KendraRanking::ExecutionPlan

      • AWS::ManagedBlockchain::Node

      • AWS::SageMaker::ExperimentTrialComponent

      • AWS::SageMaker::FeatureGroup

      • AWS::S3::AccessPoint

      • AWS::S3ObjectLambda::AccessPoint

      • AWS::S3Outposts::Object

      " }, "Values":{ "shape":"DataResourceValues", @@ -1634,7 +1690,6 @@ }, "DescribeQueryRequest":{ "type":"structure", - "required":["QueryId"], "members":{ "EventDataStore":{ "shape":"EventDataStoreArn", @@ -1645,6 +1700,10 @@ "QueryId":{ "shape":"UUID", "documentation":"

      The query ID.

      " + }, + "QueryAlias":{ + "shape":"QueryAlias", + "documentation":"

      The alias that identifies a query template.

      " } } }, @@ -1686,11 +1745,11 @@ "members":{ "trailNameList":{ "shape":"TrailNameList", - "documentation":"

      Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is:

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      If an empty list is specified, information for the trail in the current region is returned.

      • If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current region is returned.

      • If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current region and any associated shadow trails in other regions is returned.

      If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current region and current account. To return information about a trail in another region, you must specify its trail ARN.

      " + "documentation":"

      Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is:

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      If an empty list is specified, information for the trail in the current Region is returned.

      • If an empty list is specified and IncludeShadowTrails is false, then information for all trails in the current Region is returned.

      • If an empty list is specified and IncludeShadowTrails is null or true, then information for all trails in the current Region and any associated shadow trails in other Regions is returned.

      If one or more trail names are specified, information is returned only if the names match the names of trails belonging only to the current Region and current account. To return information about a trail in another Region, you must specify its trail ARN.

      " }, "includeShadowTrails":{ "shape":"Boolean", - "documentation":"

      Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region, or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account and region replication trails will not be returned. The default is true.

      " + "documentation":"

      Specifies whether to include shadow trails in the response. A shadow trail is the replication in a Region of a trail that was created in a different Region, or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account and Region replication trails will not be returned. The default is true.

      " } }, "documentation":"

      Returns information about the trail.

      " @@ -1807,7 +1866,7 @@ }, "Status":{ "shape":"EventDataStoreStatus", - "documentation":"

      The status of an event data store. Values are ENABLED and PENDING_DELETION.

      ", + "documentation":"

      The status of an event data store.

      ", "deprecated":true, "deprecatedMessage":"Status is no longer returned by ListEventDataStores" }, @@ -1819,7 +1878,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

      ", + "documentation":"

      Indicates whether the event data store includes events from all Regions, or only from the Region in which it was created.

      ", "deprecated":true, "deprecatedMessage":"MultiRegionEnabled is no longer returned by ListEventDataStores" }, @@ -1908,7 +1967,10 @@ "enum":[ "CREATED", "ENABLED", - "PENDING_DELETION" + "PENDING_DELETION", + "STARTING_INGESTION", + "STOPPING_INGESTION", + "STOPPED_INGESTION" ] }, "EventDataStoreTerminationProtectedException":{ @@ -1931,7 +1993,7 @@ }, "IncludeManagementEvents":{ "shape":"Boolean", - "documentation":"

      Specify if you want your event selector to include management events for your trail.

      For more information, see Management Events in the CloudTrail User Guide.

      By default, the value is true.

      The first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see CloudTrail Pricing.

      " + "documentation":"

      Specify if you want your event selector to include management events for your trail.

      For more information, see Management Events in the CloudTrail User Guide.

      By default, the value is true.

      The first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same Region. For more information about CloudTrail pricing, see CloudTrail Pricing.

      " }, "DataResources":{ "shape":"DataResources", @@ -1939,7 +2001,7 @@ }, "ExcludeManagementEventSources":{ "shape":"ExcludeManagementEventSources", - "documentation":"

      An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out Key Management Service or Amazon RDS Data API events by containing kms.amazonaws.com or rdsdata.amazonaws.com. By default, ExcludeManagementEventSources is empty, and KMS and Amazon RDS Data API events are logged to your trail. You can exclude management event sources only in regions that support the event source.

      " + "documentation":"

      An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out Key Management Service or Amazon RDS Data API events by containing kms.amazonaws.com or rdsdata.amazonaws.com. By default, ExcludeManagementEventSources is empty, and KMS and Amazon RDS Data API events are logged to your trail. You can exclude management event sources only in Regions that support the event source.

      " } }, "documentation":"

      Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

      You can configure up to five event selectors for a trail.

      You cannot apply both event selectors and advanced event selectors to a trail.

      " @@ -1983,7 +2045,7 @@ }, "SourceConfig":{ "shape":"SourceConfig", - "documentation":"

      Provides information about the advanced event selectors configured for the channel, and whether the channel applies to all regions or a single region.

      " + "documentation":"

      Provides information about the advanced event selectors configured for the channel, and whether the channel applies to all Regions or a single Region.

      " }, "Destinations":{ "shape":"Destinations", @@ -2018,7 +2080,7 @@ }, "Status":{ "shape":"EventDataStoreStatus", - "documentation":"

      The status of an event data store. Values can be ENABLED and PENDING_DELETION.

      " + "documentation":"

      The status of an event data store.

      " }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", @@ -2026,7 +2088,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

      " + "documentation":"

      Indicates whether the event data store includes events from all Regions, or only from the Region in which it was created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -2249,7 +2311,7 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

      Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The following is the format of a trail ARN.

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      " + "documentation":"

      Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another Region), you must specify its ARN. The following is the format of a trail ARN.

      arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      " } }, "documentation":"

      The name of a trail about which you want the current status.

      " @@ -2508,10 +2570,10 @@ "members":{ "InsightType":{ "shape":"InsightType", - "documentation":"

      The type of insights to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

      " + "documentation":"

      The type of Insights events to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid Insight types.

      The ApiCallRateInsight Insights type analyzes write-only management API calls that are aggregated per minute against a baseline API call volume.

      The ApiErrorRateInsight Insights type analyzes management API calls that result in error codes. The error is shown if the API call is unsuccessful.

      " } }, - "documentation":"

      A JSON string that contains a list of insight types that are logged on a trail.

      " + "documentation":"

      A JSON string that contains a list of Insights types that are logged on a trail.

      " }, "InsightSelectors":{ "type":"list", @@ -2528,7 +2590,7 @@ "type":"structure", "members":{ }, - "documentation":"

      This exception is thrown when the IAM user or role that is used to create the organization resource lacks one or more required permissions for creating an organization resource in a required service.

      ", + "documentation":"

      This exception is thrown when the IAM identity that is used to create the organization resource lacks one or more required permissions for creating an organization resource in a required service.

      ", "exception":true }, "InsufficientEncryptionPolicyException":{ @@ -2606,7 +2668,7 @@ "type":"structure", "members":{ }, - "documentation":"

      This exception is thrown when an operation is called on a trail from a region other than the region in which the trail was created.

      ", + "documentation":"

      This exception is thrown when an operation is called on a trail from a Region other than the Region in which the trail was created.

      ", "exception":true }, "InvalidImportSourceException":{ @@ -2754,7 +2816,7 @@ "type":"structure", "members":{ }, - "documentation":"

      This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same region, or when the KMS key associated with the Amazon SNS topic either does not exist or is not in the same region.

      ", + "documentation":"

      This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same Region, or when the KMS key associated with the Amazon SNS topic either does not exist or is not in the same Region.

      ", "exception":true }, "ListChannelsMaxResultsCount":{ @@ -2811,7 +2873,7 @@ "members":{ "EventDataStores":{ "shape":"EventDataStores", - "documentation":"

      Contains information about event data stores in the account, in the current region.

      " + "documentation":"

      Contains information about event data stores in the account, in the current Region.

      " }, "NextToken":{ "shape":"PaginationToken", @@ -2980,7 +3042,7 @@ "members":{ "ResourceIdList":{ "shape":"ResourceIdList", - "documentation":"

      Specifies a list of trail, event data store, or channel ARNs whose tags will be listed. The list has a limit of 20 ARNs.

      " + "documentation":"

      Specifies a list of trail, event data store, or channel ARNs whose tags will be listed. The list has a limit of 20 ARNs.

      Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      " }, "NextToken":{ "shape":"String", @@ -3017,7 +3079,7 @@ "members":{ "Trails":{ "shape":"Trails", - "documentation":"

      Returns the name, ARN, and home region of trails in the current account.

      " + "documentation":"

      Returns the name, ARN, and home Region of trails in the current account.

      " }, "NextToken":{ "shape":"String", @@ -3240,7 +3302,7 @@ }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", - "documentation":"

      Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events for trails in the CloudTrail User Guide.

      " + "documentation":"

      Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

      " } } }, @@ -3274,7 +3336,7 @@ }, "InsightSelectors":{ "shape":"InsightSelectors", - "documentation":"

      A JSON string that contains the insight types you want to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

      " + "documentation":"

      A JSON string that contains the insight types you want to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid Insight types.

      The ApiCallRateInsight Insights type analyzes write-only management API calls that are aggregated per minute against a baseline API call volume.

      The ApiErrorRateInsight Insights type analyzes management API calls that result in error codes. The error is shown if the API call is unsuccessful.

      " } } }, @@ -3343,6 +3405,12 @@ }, "documentation":"

      A SQL string of criteria about events that you want to collect in an event data store.

      " }, + "QueryAlias":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9._\\-]*$" + }, "QueryIdNotFoundException":{ "type":"structure", "members":{ @@ -3350,6 +3418,18 @@ "documentation":"

      The query ID does not exist or does not map to a query.

      ", "exception":true }, + "QueryParameter":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "QueryParameters":{ + "type":"list", + "member":{"shape":"QueryParameter"}, + "max":10, + "min":1 + }, "QueryResultColumn":{ "type":"map", "key":{"shape":"QueryResultKey"}, @@ -3460,7 +3540,7 @@ "members":{ "ResourceId":{ "shape":"String", - "documentation":"

      Specifies the ARN of the trail, event data store, or channel from which tags should be removed.

      Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      Example event data store ARN format: arn:aws:cloudtrail:us-east-2:12345678910:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      " + "documentation":"

      Specifies the ARN of the trail, event data store, or channel from which tags should be removed.

      Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

      Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

      Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

      " }, "TagsList":{ "shape":"TagsList", @@ -3593,7 +3673,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Indicates whether the event data store is collecting events from all regions, or only from the region in which the event data store was created.

      " + "documentation":"

      Indicates whether the event data store is collecting events from all Regions, or only from the Region in which the event data store was created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -3647,7 +3727,7 @@ }, "S3BucketRegion":{ "shape":"String", - "documentation":"

      The region associated with the source S3 bucket.

      " + "documentation":"

      The Region associated with the source S3 bucket.

      " }, "S3BucketAccessRoleArn":{ "shape":"String", @@ -3679,7 +3759,7 @@ "members":{ "ApplyToAllRegions":{ "shape":"Boolean", - "documentation":"

      Specifies whether the channel applies to a single region or to all regions.

      " + "documentation":"

      Specifies whether the channel applies to a single Region or to all Regions.

      " }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", @@ -3688,6 +3768,21 @@ }, "documentation":"

      Contains configuration information about the channel.

      " }, + "StartEventDataStoreIngestionRequest":{ + "type":"structure", + "required":["EventDataStore"], + "members":{ + "EventDataStore":{ + "shape":"EventDataStoreArn", + "documentation":"

      The ARN (or ID suffix of the ARN) of the event data store for which you want to start ingestion.

      " + } + } + }, + "StartEventDataStoreIngestionResponse":{ + "type":"structure", + "members":{ + } + }, "StartImportRequest":{ "type":"structure", "members":{ @@ -3769,7 +3864,6 @@ }, "StartQueryRequest":{ "type":"structure", - "required":["QueryStatement"], "members":{ "QueryStatement":{ "shape":"QueryStatement", @@ -3778,6 +3872,14 @@ "DeliveryS3Uri":{ "shape":"DeliveryS3Uri", "documentation":"

      The URI for the S3 bucket where CloudTrail delivers the query results.

      " + }, + "QueryAlias":{ + "shape":"QueryAlias", + "documentation":"

      The alias that identifies a query template.

      " + }, + "QueryParameters":{ + "shape":"QueryParameters", + "documentation":"

      The query parameters for the specified QueryAlias.

      " } } }, @@ -3790,6 +3892,21 @@ } } }, + "StopEventDataStoreIngestionRequest":{ + "type":"structure", + "required":["EventDataStore"], + "members":{ + "EventDataStore":{ + "shape":"EventDataStoreArn", + "documentation":"

      The ARN (or ID suffix of the ARN) of the event data store for which you want to stop ingestion.

      " + } + } + }, + "StopEventDataStoreIngestionResponse":{ + "type":"structure", + "members":{ + } + }, "StopImportRequest":{ "type":"structure", "required":["ImportId"], @@ -3915,7 +4032,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

      This field is no longer in use. Use SnsTopicARN.

      ", + "documentation":"

      This field is no longer in use. Use SnsTopicARN.

      ", "deprecated":true }, "SnsTopicARN":{ @@ -3928,11 +4045,11 @@ }, "IsMultiRegionTrail":{ "shape":"Boolean", - "documentation":"

      Specifies whether the trail exists only in one region or exists in all regions.

      " + "documentation":"

      Specifies whether the trail exists only in one Region or exists in all Regions.

      " }, "HomeRegion":{ "shape":"String", - "documentation":"

      The region in which the trail was created.

      " + "documentation":"

      The Region in which the trail was created.

      " }, "TrailARN":{ "shape":"String", @@ -3992,7 +4109,7 @@ "documentation":"

      The Amazon Web Services Region in which a trail was created.

      " } }, - "documentation":"

      Information about a CloudTrail trail, including the trail's name, home region, and Amazon Resource Name (ARN).

      " + "documentation":"

      Information about a CloudTrail trail, including the trail's name, home Region, and Amazon Resource Name (ARN).

      " }, "TrailList":{ "type":"list", @@ -4090,7 +4207,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Specifies whether an event data store collects events from all regions, or only from the region in which it was created.

      " + "documentation":"

      Specifies whether an event data store collects events from all Regions, or only from the Region in which it was created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -4123,7 +4240,7 @@ }, "Status":{ "shape":"EventDataStoreStatus", - "documentation":"

      The status of an event data store. Values can be ENABLED and PENDING_DELETION.

      " + "documentation":"

      The status of an event data store.

      " }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", @@ -4131,7 +4248,7 @@ }, "MultiRegionEnabled":{ "shape":"Boolean", - "documentation":"

      Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

      " + "documentation":"

      Indicates whether the event data store includes events from all Regions, or only from the Region in which it was created.

      " }, "OrganizationEnabled":{ "shape":"Boolean", @@ -4185,7 +4302,7 @@ }, "IsMultiRegionTrail":{ "shape":"Boolean", - "documentation":"

      Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider using trails that log events in all regions.

      " + "documentation":"

      Specifies whether the trail applies only to the current Region or to all Regions. The default is false. If the trail exists only in the current Region and this value is set to true, shadow trails (replications of the trail) will be created in the other Regions. If the trail exists in all Regions and this value is set to false, the trail will remain in the Region where it was created, and its shadow trails in other Regions will be deleted. As a best practice, consider using trails that log events in all Regions.

      " }, "EnableLogFileValidation":{ "shape":"Boolean", @@ -4227,7 +4344,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

      This field is no longer in use. Use UpdateTrailResponse$SnsTopicARN.

      ", + "documentation":"

      This field is no longer in use. Use SnsTopicARN.

      ", "deprecated":true }, "SnsTopicARN":{ @@ -4240,7 +4357,7 @@ }, "IsMultiRegionTrail":{ "shape":"Boolean", - "documentation":"

      Specifies whether the trail exists in one region or in all regions.

      " + "documentation":"

      Specifies whether the trail exists in one Region or in all Regions.

      " }, "TrailARN":{ "shape":"String", diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 41399659927a..45347092dfe9 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 3e2604db5214..d35e34316e67 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 2e95acfd3ed9..5ada6a8a29d3 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 9b7ff9d5c8a8..8928264651cf 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-rule-set.json index 856fd2b63d9d..29f4692677a2 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,212 +111,276 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://logs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://logs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "us-gov-east-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], - "endpoint": { - "url": "https://logs.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-gov-east-1" + ] + } + ], + "endpoint": { + "url": "https://logs.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { - "ref": "Region" + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-gov-west-1" + ] + } + ], + "endpoint": { + "url": "https://logs.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - "us-gov-west-1" + { + "conditions": [], + "endpoint": { + "url": "https://logs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://logs.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, { "conditions": [], - "endpoint": { - "url": "https://logs-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://logs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://logs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://logs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://logs.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-tests.json index 70cfb3fa8acd..11c361c527c0 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1711 +1,584 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "sa-east-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://logs-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "sa-east-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.sa-east-1.api.aws" + "url": "https://logs.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "af-south-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.sa-east-1.amazonaws.com" + "url": "https://logs.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-east-1.api.aws" + "url": "https://logs.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-east-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-east-1.amazonaws.com" + "url": "https://logs.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-east-1" + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-east-1.api.aws" + "url": "https://logs.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-east-1.amazonaws.com" + "url": "https://logs.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://logs.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "cn-north-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.cn-north-1.amazonaws.com.cn" + "url": "https://logs.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "cn-north-1" + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://logs.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.cn-north-1.amazonaws.com.cn" + "url": "https://logs.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-gov-west-1.api.aws" + "url": "https://logs.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-gov-west-1.amazonaws.com" + "url": "https://logs.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-gov-west-1.api.aws" + "url": "https://logs.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-gov-west-1.amazonaws.com" + "url": "https://logs.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-1.api.aws" + "url": "https://logs.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-1" + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-1.amazonaws.com" + "url": "https://logs.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-southeast-1" + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-1.api.aws" + "url": "https://logs.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-1.amazonaws.com" + "url": "https://logs.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-2.api.aws" + "url": "https://logs.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-2" + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-2.amazonaws.com" + "url": "https://logs-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-2.api.aws" + "url": "https://logs.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-2.amazonaws.com" + "url": "https://logs-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-southeast-2" - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://logs.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://logs-fips.us-west-1.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-iso-east-1" + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-iso-east-1.c2s.ic.gov" + "url": "https://logs.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-3.api.aws" + "url": "https://logs-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-west-2", "UseFIPS": true, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-3.amazonaws.com" + "url": "https://logs-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-3" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-3.api.aws" + "url": "https://logs.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-3.amazonaws.com" + "url": "https://logs.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-4.api.aws" + "url": "https://logs.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-4" + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs-fips.ap-southeast-4.amazonaws.com" + "url": "https://logs-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-4.api.aws" + "url": "https://logs-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-southeast-4" + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs.ap-southeast-4.amazonaws.com" + "url": "https://logs.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-4" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-east-1.api.aws" + "url": "https://logs.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-east-1.amazonaws.com" + "url": "https://logs.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-east-1.api.aws" + "url": "https://logs.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-east-1.amazonaws.com" + "url": "https://logs.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-east-2.api.aws" + "url": "https://logs-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://logs-fips.us-east-2.amazonaws.com" + "url": "https://logs.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-east-2.api.aws" + "url": "https://logs.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.us-east-2.amazonaws.com" + "url": "https://logs.us-iso-west-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://logs-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://logs-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://logs.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://logs.cn-northwest-1.amazonaws.com.cn" + "url": "https://logs.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -1714,9 +587,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -1727,9 +600,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -1738,35 +611,35 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://logs.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1776,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1788,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json index e8c3d66f32ae..b6fe88783fa5 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json @@ -74,7 +74,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

      Creates a log group with the specified name. You can create up to 20,000 log groups per account.

      You must use the following guidelines when naming a log group:

      • Log group names must be unique within a Region for an Amazon Web Services account.

      • Log group names can be between 1 and 512 characters long.

      • Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)

      When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.

      If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

      If you attempt to associate a KMS key with the log group but the KMS keydoes not exist or the KMS key is disabled, you receive an InvalidParameterException error.

      CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.

      " + "documentation":"

      Creates a log group with the specified name. You can create up to 20,000 log groups per account.

      You must use the following guidelines when naming a log group:

      • Log group names must be unique within a Region for an Amazon Web Services account.

      • Log group names can be between 1 and 512 characters long.

      • Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)

      When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.

      If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

      If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

      CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.

      " }, "CreateLogStream":{ "name":"CreateLogStream", @@ -91,6 +91,21 @@ ], "documentation":"

      Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.

      There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream operations, after which transactions are throttled.

      You must use the following guidelines when naming a log stream:

      • Log stream names must be unique within the log group.

      • Log stream names can be between 1 and 512 characters long.

      • Don't use ':' (colon) or '*' (asterisk) characters.

      " }, + "DeleteAccountPolicy":{ + "name":"DeleteAccountPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccountPolicyRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"OperationAbortedException"} + ], + "documentation":"

      Deletes a CloudWatch Logs account policy.

      To use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions.

      " + }, "DeleteDataProtectionPolicy":{ "name":"DeleteDataProtectionPolicy", "http":{ @@ -225,6 +240,22 @@ ], "documentation":"

      Deletes the specified subscription filter.

      " }, + "DescribeAccountPolicies":{ + "name":"DescribeAccountPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAccountPoliciesRequest"}, + "output":{"shape":"DescribeAccountPoliciesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

      Returns a list of all CloudWatch Logs account policies in the account.

      " + }, "DescribeDestinations":{ "name":"DescribeDestinations", "http":{ @@ -383,7 +414,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

      Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.

      You must have the logs;FilterLogEvents permission to perform this operation.

      You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

      By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.

      The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request.

      If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

      " + "documentation":"

      Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.

      You must have the logs:FilterLogEvents permission to perform this operation.

      You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

      By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.

      The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request.

      If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

      " }, "GetDataProtectionPolicy":{ "name":"GetDataProtectionPolicy", @@ -494,6 +525,22 @@ "deprecated":true, "deprecatedMessage":"Please use the generic tagging API ListTagsForResource" }, + "PutAccountPolicy":{ + "name":"PutAccountPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAccountPolicyRequest"}, + "output":{"shape":"PutAccountPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

      Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level policy.

      Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

      If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

      By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

      For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

      To use the PutAccountPolicy operation, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

      The PutAccountPolicy operation applies to all log groups in the account. You can also use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

      " + }, "PutDataProtectionPolicy":{ "name":"PutDataProtectionPolicy", "http":{ @@ -509,7 +556,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

      Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.

      Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.

      By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

      For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

      " + "documentation":"

      Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.

      Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.

      By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

      For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

      The PutDataProtectionPolicy operation applies to only the specified log group. You can also use PutAccountPolicy to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

      " }, "PutDestination":{ "name":"PutDestination", @@ -556,7 +603,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnrecognizedClientException"} ], - "documentation":"

      Uploads a batch of log events to the specified log stream.

      The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream.

      The batch of events must satisfy the following constraints:

      • The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

      • None of the log events in the batch can be more than 2 hours in the future.

      • None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.

      • The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

      • A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.

      • The maximum number of log events in a batch is 10,000.

      • The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.

      If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.

      " + "documentation":"

      Uploads a batch of log events to the specified log stream.

      The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream.

      The batch of events must satisfy the following constraints:

      • The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

      • None of the log events in the batch can be more than 2 hours in the future.

      • None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.

      • The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

      • A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.

      • Each log event can be no larger than 256 KB.

      • The maximum number of log events in a batch is 10,000.

      • The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.

      If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.

      " }, "PutMetricFilter":{ "name":"PutMetricFilter", @@ -634,7 +681,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

      Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

      The following destinations are supported for subscription filters:

      • An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.

      • A logical destination that belongs to a different account, for cross-account delivery.

      • An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.

      • An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.

      Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

      To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole permission.

      " + "documentation":"

      Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

      The following destinations are supported for subscription filters:

      • An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.

      • A logical destination that belongs to a different account, for cross-account delivery.

      • An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.

      • An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.

      Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

      To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

      " }, "StartQuery":{ "name":"StartQuery", @@ -651,7 +698,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

      Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.

      For more information, see CloudWatch Logs Insights Query Syntax.

      Queries time out after 15 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.

      If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account.

      You can have up to 20 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.

      " + "documentation":"

      Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.

      For more information, see CloudWatch Logs Insights Query Syntax.

      Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.

      If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account.

      You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.

      " }, "StopQuery":{ "name":"StopQuery", @@ -758,6 +805,41 @@ "max":20, "min":0 }, + "AccountPolicies":{ + "type":"list", + "member":{"shape":"AccountPolicy"} + }, + "AccountPolicy":{ + "type":"structure", + "members":{ + "policyName":{ + "shape":"PolicyName", + "documentation":"

      The name of the account policy.

      " + }, + "policyDocument":{ + "shape":"AccountPolicyDocument", + "documentation":"

      The policy document for this account policy.

      The JSON specified in policyDocument can be up to 30,720 characters.

      " + }, + "lastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time that this policy was most recently updated.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The type of policy for this account policy.

      " + }, + "scope":{ + "shape":"Scope", + "documentation":"

      The scope of the account policy.

      " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

      The Amazon Web Services account ID that the policy applies to.

      " + } + }, + "documentation":"

      A structure that contains information about one CloudWatch Logs account policy.

      " + }, + "AccountPolicyDocument":{"type":"string"}, "AmazonResourceName":{ "type":"string", "max":1011, @@ -895,9 +977,26 @@ }, "Days":{ "type":"integer", - "documentation":"

      The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, and 3653.

      To set a log group so that its log events do not expire, use DeleteRetentionPolicy.

      " + "documentation":"

      The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, and 3653.

      To set a log group so that its log events do not expire, use DeleteRetentionPolicy.

      " }, "DefaultValue":{"type":"double"}, + "DeleteAccountPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyType" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "documentation":"

      The name of the policy to delete.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The type of policy to delete. Currently, the only valid value is DATA_PROTECTION_POLICY.

      " + } + } + }, "DeleteDataProtectionPolicyRequest":{ "type":"structure", "required":["logGroupIdentifier"], @@ -1018,6 +1117,33 @@ } }, "Descending":{"type":"boolean"}, + "DescribeAccountPoliciesRequest":{ + "type":"structure", + "required":["policyType"], + "members":{ + "policyType":{ + "shape":"PolicyType", + "documentation":"

      Use this parameter to limit the returned policies to only the policies that match the policy type that you specify. Currently, the only valid value is DATA_PROTECTION_POLICY.

      " + }, + "policyName":{ + "shape":"PolicyName", + "documentation":"

      Use this parameter to limit the returned policies to only the policy with the name that you specify.

      " + }, + "accountIdentifiers":{ + "shape":"AccountIds", + "documentation":"

      If you are using an account that is set up as a monitoring account for CloudWatch unified cross-account observability, you can use this to specify the account ID of a source account. If you do, the operation returns the account policy for the specified account. Currently, you can specify only one account ID in this parameter.

      If you omit this parameter, only the policy in the current account is returned.

      " + } + } + }, + "DescribeAccountPoliciesResponse":{ + "type":"structure", + "members":{ + "accountPolicies":{ + "shape":"AccountPolicies", + "documentation":"

      An array of structures that contain information about the CloudWatch Logs account policies that match the specified filters.

      " + } + } + }, "DescribeDestinationsRequest":{ "type":"structure", "members":{ @@ -1094,7 +1220,7 @@ }, "logGroupNamePattern":{ "shape":"LogGroupNamePattern", - "documentation":"

      If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo, log groups named FooBar, aws/Foo, and GroupFoo would match, but foo, F/o/o and Froo would not match.

      logGroupNamePattern and logGroupNamePrefix are mutually exclusive. Only one of these parameters can be passed.

      " + "documentation":"

      If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo, log groups named FooBar, aws/Foo, and GroupFoo would match, but foo, F/o/o and Froo would not match.

      If you specify logGroupNamePattern in your request, then only arn, creationTime, and logGroupName are included in the response.

      logGroupNamePattern and logGroupNamePrefix are mutually exclusive. Only one of these parameters can be passed.

      " }, "nextToken":{ "shape":"NextToken", @@ -1106,7 +1232,7 @@ }, "includeLinkedAccounts":{ "shape":"IncludeLinkedAccounts", - "documentation":"

      If you are using a monitoring account, set this to True to have the operation return log groups in the accounts listed in accountIdentifiers.

      If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

      If you specify includeLinkedAccounts in your request, then metricFilterCount, retentionInDays, and storedBytes are not included in the response.

      " + "documentation":"

      If you are using a monitoring account, set this to True to have the operation return log groups in the accounts listed in accountIdentifiers.

      If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

      " } } }, @@ -1775,11 +1901,19 @@ }, "status":{ "shape":"QueryStatus", - "documentation":"

      The status of the most recent running of the query. Possible values are Cancelled, Complete, Failed, Running, Scheduled, Timeout, and Unknown.

      Queries time out after 15 minutes of runtime. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.

      " + "documentation":"

      The status of the most recent running of the query. Possible values are Cancelled, Complete, Failed, Running, Scheduled, Timeout, and Unknown.

      Queries time out after 60 minutes of runtime. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.

      " } } }, "IncludeLinkedAccounts":{"type":"boolean"}, + "InheritedProperties":{ + "type":"list", + "member":{"shape":"InheritedProperty"} + }, + "InheritedProperty":{ + "type":"string", + "enum":["ACCOUNT_DATA_PROTECTION"] + }, "InputLogEvent":{ "type":"structure", "required":[ @@ -1793,7 +1927,7 @@ }, "message":{ "shape":"EventMessage", - "documentation":"

      The raw event message.

      " + "documentation":"

      The raw event message. Each log event can be no larger than 256 KB.

      " } }, "documentation":"

      Represents a log event, which is a record of activity that was recorded by the application or resource being monitored.

      " @@ -1918,6 +2052,10 @@ "dataProtectionStatus":{ "shape":"DataProtectionStatus", "documentation":"

      Displays whether this log group has a protection policy, or whether it had one in the past. For more information, see PutDataProtectionPolicy.

      " + }, + "inheritedProperties":{ + "shape":"InheritedProperties", + "documentation":"

      Displays all the properties that this log group has inherited from account-level settings.

      " } }, "documentation":"

      Represents a log group.

      " @@ -2194,6 +2332,45 @@ "min":1 }, "PolicyName":{"type":"string"}, + "PolicyType":{ + "type":"string", + "enum":["DATA_PROTECTION_POLICY"] + }, + "PutAccountPolicyRequest":{ + "type":"structure", + "required":[ + "policyName", + "policyDocument", + "policyType" + ], + "members":{ + "policyName":{ + "shape":"PolicyName", + "documentation":"

      A name for the policy. This must be unique within the account.

      " + }, + "policyDocument":{ + "shape":"AccountPolicyDocument", + "documentation":"

      Specify the data protection policy, in JSON.

      This policy must include two JSON blocks:

      • The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.

        The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.

      • The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy.

        The Operation property with the Deidentify action is what actually masks the data, and it must contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty.

      For an example data protection policy, see the Examples section on this page.

      The contents of the two DataIdentifer arrays must match exactly.

      In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

      The JSON specified in policyDocument can be up to 30,720 characters.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      Currently the only valid value for this parameter is DATA_PROTECTION_POLICY.

      " + }, + "scope":{ + "shape":"Scope", + "documentation":"

      Currently the only valid value for this parameter is GLOBAL, which specifies that the data protection policy applies to all log groups in the account. If you omit this parameter, the default of GLOBAL is used.

      " + } + } + }, + "PutAccountPolicyResponse":{ + "type":"structure", + "members":{ + "accountPolicy":{ + "shape":"AccountPolicy", + "documentation":"

      The account policy that you created.

      " + } + } + }, "PutDataProtectionPolicyRequest":{ "type":"structure", "required":[ @@ -2207,7 +2384,7 @@ }, "policyDocument":{ "shape":"DataProtectionPolicyDocument", - "documentation":"

      Specify the data protection policy, in JSON.

      This policy must include two JSON blocks:

      • The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.

        The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.

      • The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy.

        The Operation property with the Deidentify action is what actually masks the data, and it must contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty.

      For an example data protection policy, see the Examples section on this page.

      The contents of two DataIdentifer arrays must match exactly.

      " + "documentation":"

      Specify the data protection policy, in JSON.

      This policy must include two JSON blocks:

      • The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.

        The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.

      • The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy.

        The Operation property with the Deidentify action is what actually masks the data, and it must contain the \"MaskConfig\": {} object. The \"MaskConfig\": {} object must be empty.

      For an example data protection policy, see the Examples section on this page.

      The contents of the two DataIdentifer arrays must match exactly.

      In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

      The JSON specified in policyDocument can be up to 30,720 characters.

      " } } }, @@ -2245,7 +2422,7 @@ }, "forceUpdate":{ "shape":"ForceUpdate", - "documentation":"

      Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual AWS accounts. Before you update a destination policy this way, you must first update the subscription filters in the accounts that send logs to this destination. If you do not, the subscription filters might stop working. By specifying true for forceUpdate, you are affirming that you have already updated the subscription filters. For more information, see Updating an existing cross-account subscription

      If you omit this parameter, the default of false is used.

      " + "documentation":"

      Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual Amazon Web Services accounts. Before you update a destination policy this way, you must first update the subscription filters in the accounts that send logs to this destination. If you do not, the subscription filters might stop working. By specifying true for forceUpdate, you are affirming that you have already updated the subscription filters. For more information, see Updating an existing cross-account subscription

      If you omit this parameter, the default of false is used.

      " } } }, @@ -2679,6 +2856,10 @@ "type":"string", "min":1 }, + "Scope":{ + "type":"string", + "enum":["ALL"] + }, "SearchedLogStream":{ "type":"structure", "members":{ diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 7ea38d313ea0..240ff83fbcb1 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 92e494b1b827..c668e68c5e46 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index 92e7e53b8be0..c041611c9d17 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 72188c45e121..e3eca675ee81 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 83abe104a58a..d151627c76df 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 5c7f4ce530bb..37192adaac82 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index d324aaf5163c..a1a55cde16ad 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml new file mode 100644 index 000000000000..a07c6f20d4b4 --- /dev/null +++ b/services/codegurusecurity/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.93-SNAPSHOT + + codegurusecurity + AWS Java SDK :: Services :: Code Guru Security + The AWS Java SDK for Code Guru Security module holds the client classes that are used for + communicating with Code Guru Security. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codegurusecurity + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..9c6f96dac9a8 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..df56b8cd3835 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json b/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..03e1cbfce771 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "GetFindings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findings" + }, + "ListFindingsMetrics": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findingsMetrics" + }, + "ListScans": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "summaries" + } + } +} diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json b/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..024d90e9101e --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1514 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"codeguru-security", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon CodeGuru Security", + "serviceId":"CodeGuru Security", + "signatureVersion":"v4", + "signingName":"codeguru-security", + "uid":"codeguru-security-2018-05-10" + }, + "operations":{ + "BatchGetFindings":{ + "name":"BatchGetFindings", + "http":{ + "method":"POST", + "requestUri":"/batchGetFindings", + "responseCode":200 + }, + "input":{"shape":"BatchGetFindingsRequest"}, + "output":{"shape":"BatchGetFindingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns a list of all requested findings.

      " + }, + "CreateScan":{ + "name":"CreateScan", + "http":{ + "method":"POST", + "requestUri":"/scans", + "responseCode":200 + }, + "input":{"shape":"CreateScanRequest"}, + "output":{"shape":"CreateScanResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Use to create a scan using code uploaded to an S3 bucket.

      " + }, + "CreateUploadUrl":{ + "name":"CreateUploadUrl", + "http":{ + "method":"POST", + "requestUri":"/uploadUrl", + "responseCode":200 + }, + "input":{"shape":"CreateUploadUrlRequest"}, + "output":{"shape":"CreateUploadUrlResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Generates a pre-signed URL and request headers used to upload a code resource.

      You can upload your code resource to the URL and add the request headers using any HTTP client.

      " + }, + "GetAccountConfiguration":{ + "name":"GetAccountConfiguration", + "http":{ + "method":"GET", + "requestUri":"/accountConfiguration/get", + "responseCode":200 + }, + "input":{"shape":"GetAccountConfigurationRequest"}, + "output":{"shape":"GetAccountConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Use to get account level configuration.

      " + }, + "GetFindings":{ + "name":"GetFindings", + "http":{ + "method":"GET", + "requestUri":"/findings/{scanName}", + "responseCode":200 + }, + "input":{"shape":"GetFindingsRequest"}, + "output":{"shape":"GetFindingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns a list of all findings generated by a particular scan.

      " + }, + "GetMetricsSummary":{ + "name":"GetMetricsSummary", + "http":{ + "method":"GET", + "requestUri":"/metrics/summary", + "responseCode":200 + }, + "input":{"shape":"GetMetricsSummaryRequest"}, + "output":{"shape":"GetMetricsSummaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns top level metrics about an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings.

      " + }, + "GetScan":{ + "name":"GetScan", + "http":{ + "method":"GET", + "requestUri":"/scans/{scanName}", + "responseCode":200 + }, + "input":{"shape":"GetScanRequest"}, + "output":{"shape":"GetScanResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns details about a scan, including whether or not a scan has completed.

      " + }, + "ListFindingsMetrics":{ + "name":"ListFindingsMetrics", + "http":{ + "method":"GET", + "requestUri":"/metrics/findings", + "responseCode":200 + }, + "input":{"shape":"ListFindingsMetricsRequest"}, + "output":{"shape":"ListFindingsMetricsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns metrics about all findings in an account within a specified time range.

      " + }, + "ListScans":{ + "name":"ListScans", + "http":{ + "method":"GET", + "requestUri":"/scans", + "responseCode":200 + }, + "input":{"shape":"ListScansRequest"}, + "output":{"shape":"ListScansResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns a list of all the scans in an account.

      " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Returns a list of all tags associated with a scan.

      " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Use to add one or more tags to an existing scan.

      " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Use to remove one or more tags from an existing scan.

      ", + "idempotent":true + }, + "UpdateAccountConfiguration":{ + "name":"UpdateAccountConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/updateAccountConfiguration", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountConfigurationRequest"}, + "output":{"shape":"UpdateAccountConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Use to update account-level configuration with an encryption key.

      " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

      The identifier for the error.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + }, + "resourceId":{ + "shape":"String", + "documentation":"

      The identifier for the resource you don't have access to.

      " + }, + "resourceType":{ + "shape":"String", + "documentation":"

      The type of resource you don't have access to.

      " + } + }, + "documentation":"

      You do not have sufficient access to perform this action.

      ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountFindingsMetric":{ + "type":"structure", + "members":{ + "closedFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

      The number of closed findings of each severity in an account on the specified date.

      " + }, + "date":{ + "shape":"Timestamp", + "documentation":"

      The date from which the finding metrics were retrieved.

      " + }, + "meanTimeToClose":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

      The average time it takes to close findings of each severity in days.

      " + }, + "newFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

      The number of new findings of each severity in account on the specified date.

      " + }, + "openFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

      The number of open findings of each severity in an account as of the specified date.

      " + } + }, + "documentation":"

      A summary of findings metrics in an account.

      " + }, + "AnalysisType":{ + "type":"string", + "enum":[ + "Security", + "All" + ] + }, + "BatchGetFindingsError":{ + "type":"structure", + "required":[ + "errorCode", + "findingId", + "message", + "scanName" + ], + "members":{ + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

      A code associated with the type of error.

      " + }, + "findingId":{ + "shape":"String", + "documentation":"

      The finding ID of the finding that was not fetched.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Describes the error.

      " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan that generated the finding.

      " + } + }, + "documentation":"

      Contains information about the error that caused a finding to fail to be retrieved.

      " + }, + "BatchGetFindingsErrors":{ + "type":"list", + "member":{"shape":"BatchGetFindingsError"} + }, + "BatchGetFindingsRequest":{ + "type":"structure", + "required":["findingIdentifiers"], + "members":{ + "findingIdentifiers":{ + "shape":"FindingIdentifiers", + "documentation":"

      A list of finding identifiers. Each identifier consists of a scanName and a findingId. You retrieve the findingId when you call GetFindings.

      " + } + } + }, + "BatchGetFindingsResponse":{ + "type":"structure", + "required":[ + "failedFindings", + "findings" + ], + "members":{ + "failedFindings":{ + "shape":"BatchGetFindingsErrors", + "documentation":"

      A list of errors for individual findings which were not fetched. Each BatchGetFindingsError contains the scanName, findingId, errorCode and error message.

      " + }, + "findings":{ + "shape":"Findings", + "documentation":"

      A list of all requested findings.

      " + } + } + }, + "CategoriesWithMostFindings":{ + "type":"list", + "member":{"shape":"CategoryWithFindingNum"}, + "max":5, + "min":0 + }, + "CategoryWithFindingNum":{ + "type":"structure", + "members":{ + "categoryName":{ + "shape":"String", + "documentation":"

      The name of the finding category. A finding category is determined by the detector that detected the finding.

      " + }, + "findingNumber":{ + "shape":"Integer", + "documentation":"

      The number of open findings in the category.

      " + } + }, + "documentation":"

      Information about a finding category with open findings.

      " + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\S]+$" + }, + "CodeLine":{ + "type":"structure", + "members":{ + "content":{ + "shape":"String", + "documentation":"

      The code that contains a vulnerability.

      " + }, + "number":{ + "shape":"Integer", + "documentation":"

      The code line number.

      " + } + }, + "documentation":"

      The line of code where a finding was detected.

      " + }, + "CodeSnippet":{ + "type":"list", + "member":{"shape":"CodeLine"} + }, + "ConflictException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "resourceId", + "resourceType" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

      The identifier for the error.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + }, + "resourceId":{ + "shape":"String", + "documentation":"

      The identifier for the service resource associated with the request.

      " + }, + "resourceType":{ + "shape":"String", + "documentation":"

      The type of resource associated with the request.

      " + } + }, + "documentation":"

      The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

      ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateScanRequest":{ + "type":"structure", + "required":[ + "resourceId", + "scanName" + ], + "members":{ + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

      The type of analysis you want CodeGuru Security to perform in the scan, either Security or All. The Secuirty type only generates findings related to security. The All type generates both security findings and quality findings. Defaults to Security type if missing.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      The idempotency token for the request. Amazon CodeGuru Security uses this value to prevent the accidental creation of duplicate scans if there are failures and retries.

      ", + "idempotencyToken":true + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

      The identifier for an input resource used to create a scan.

      " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type. If not specified, it will be auto generated.

      " + }, + "scanType":{ + "shape":"ScanType", + "documentation":"

      The type of scan, either Standard or Express. Defaults to Standard type if missing.

      Express scans run on limited resources and use a limited set of detectors to analyze your code in near-real time. Standard scans have standard resource limits and use the full set of detectors to analyze your code.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      An array of key-value pairs used to tag a scan. A tag is a custom attribute label with two parts:

      • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

      • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

      " + } + } + }, + "CreateScanResponse":{ + "type":"structure", + "required":[ + "resourceId", + "runId", + "scanName", + "scanState" + ], + "members":{ + "resourceId":{ + "shape":"ResourceId", + "documentation":"

      The identifier for the resource object that contains resources that were scanned.

      " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

      UUID that identifies the individual scan run.

      " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan.

      " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN for the scan name.

      " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

      The current state of the scan. Returns either InProgress, Successful, or Failed.

      " + } + } + }, + "CreateUploadUrlRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan that will use the uploaded resource. CodeGuru Security uses the unique scan name to track revisions across multiple scans of the same resource. Use this scanName when you call CreateScan on the code resource you upload to this URL.

      " + } + } + }, + "CreateUploadUrlResponse":{ + "type":"structure", + "required":[ + "codeArtifactId", + "requestHeaders", + "s3Url" + ], + "members":{ + "codeArtifactId":{ + "shape":"Uuid", + "documentation":"

      The identifier for the uploaded code resource.

      " + }, + "requestHeaders":{ + "shape":"RequestHeaderMap", + "documentation":"

      A set of key-value pairs that contain the required headers when uploading your resource.

      " + }, + "s3Url":{ + "shape":"S3Url", + "documentation":"

      A pre-signed S3 URL. You can upload the code file you want to scan and add the required requestHeaders using any HTTP client.

      " + } + } + }, + "DetectorTags":{ + "type":"list", + "member":{"shape":"String"} + }, + "Double":{ + "type":"double", + "box":true + }, + "EncryptionConfig":{ + "type":"structure", + "members":{ + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

      The KMS key ARN to use for encryption. This must be provided as a header when uploading your code resource.

      " + } + }, + "documentation":"

      Information about account-level configuration.

      " + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "DUPLICATE_IDENTIFIER", + "ITEM_DOES_NOT_EXIST", + "INTERNAL_ERROR", + "INVALID_FINDING_ID", + "INVALID_SCAN_NAME" + ] + }, + "FilePath":{ + "type":"structure", + "members":{ + "codeSnippet":{ + "shape":"CodeSnippet", + "documentation":"

      A list of CodeLine objects that describe where the security vulnerability appears in your code.

      " + }, + "endLine":{ + "shape":"Integer", + "documentation":"

      The last line number of the code snippet where the security vulnerability appears in your code.

      " + }, + "name":{ + "shape":"String", + "documentation":"

      The name of the file.

      " + }, + "path":{ + "shape":"String", + "documentation":"

      The path to the resource with the security vulnerability.

      " + }, + "startLine":{ + "shape":"Integer", + "documentation":"

      The first line number of the code snippet where the security vulnerability appears in your code.

      " + } + }, + "documentation":"

      Information about the location of security vulnerabilities that Amazon CodeGuru Security detected in your code.

      " + }, + "Finding":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

      The time when the finding was created.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      A description of the finding.

      " + }, + "detectorId":{ + "shape":"String", + "documentation":"

      The identifier for the detector that detected the finding in your code. A detector is a defined rule based on industry standards and AWS best practices.

      " + }, + "detectorName":{ + "shape":"String", + "documentation":"

      The name of the detector that identified the security vulnerability in your code.

      " + }, + "detectorTags":{ + "shape":"DetectorTags", + "documentation":"

      One or more tags or categorizations that are associated with a detector. These tags are defined by type, programming language, or other classification such as maintainability or consistency.

      " + }, + "generatorId":{ + "shape":"String", + "documentation":"

      The identifier for the component that generated a finding such as AWSCodeGuruSecurity or AWSInspector.

      " + }, + "id":{ + "shape":"String", + "documentation":"

      The identifier for a finding.

      " + }, + "remediation":{ + "shape":"Remediation", + "documentation":"

      An object that contains the details about how to remediate a finding.

      " + }, + "resource":{ + "shape":"Resource", + "documentation":"

      The resource where Amazon CodeGuru Security detected a finding.

      " + }, + "ruleId":{ + "shape":"String", + "documentation":"

      The identifier for the rule that generated the finding.

      " + }, + "severity":{ + "shape":"Severity", + "documentation":"

      The severity of the finding.

      " + }, + "status":{ + "shape":"Status", + "documentation":"

      The status of the finding. A finding status can be open or closed.

      " + }, + "title":{ + "shape":"String", + "documentation":"

      The title of the finding.

      " + }, + "type":{ + "shape":"String", + "documentation":"

      The type of finding.

      " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

      The time when the finding was last updated. Findings are updated when you remediate them or when the finding code location changes.

      " + }, + "vulnerability":{ + "shape":"Vulnerability", + "documentation":"

      An object that describes the detected security vulnerability.

      " + } + }, + "documentation":"

      Information about a finding that was detected in your code.

      " + }, + "FindingIdentifier":{ + "type":"structure", + "required":[ + "findingId", + "scanName" + ], + "members":{ + "findingId":{ + "shape":"String", + "documentation":"

      The identifier for a finding.

      " + }, + "scanName":{ + "shape":"String", + "documentation":"

      The name of the scan that generated the finding.

      " + } + }, + "documentation":"

      An object that contains information about a finding and the scan that generated it.

      " + }, + "FindingIdentifiers":{ + "type":"list", + "member":{"shape":"FindingIdentifier"}, + "max":25, + "min":1 + }, + "FindingMetricsValuePerSeverity":{ + "type":"structure", + "members":{ + "critical":{ + "shape":"Double", + "documentation":"

      The severity of the finding is critical and should be addressed immediately.

      " + }, + "high":{ + "shape":"Double", + "documentation":"

      The severity of the finding is high and should be addressed as a near-term priority.

      " + }, + "info":{ + "shape":"Double", + "documentation":"

      The finding is related to quality or readability improvements and not considered actionable.

      " + }, + "low":{ + "shape":"Double", + "documentation":"

      The severity of the finding is low and does require action on its own.

      " + }, + "medium":{ + "shape":"Double", + "documentation":"

      The severity of the finding is medium and should be addressed as a mid-term priority.

      " + } + }, + "documentation":"

      The severity of the issue in the code that generated a finding.

      " + }, + "Findings":{ + "type":"list", + "member":{"shape":"Finding"} + }, + "FindingsMetricList":{ + "type":"list", + "member":{"shape":"AccountFindingsMetric"} + }, + "GetAccountConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountConfigurationResponse":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

      An EncryptionConfig object that contains the KMS key ARN to use for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration.

      " + } + } + }, + "GetFindingsRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "maxResults":{ + "shape":"GetFindingsRequestMaxResultsInteger", + "documentation":"

      The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan you want to retrieve findings from.

      ", + "location":"uri", + "locationName":"scanName" + }, + "status":{ + "shape":"Status", + "documentation":"

      The status of the findings you want to get. Pass either Open, Closed, or All.

      ", + "location":"querystring", + "locationName":"status" + } + } + }, + "GetFindingsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "GetFindingsResponse":{ + "type":"structure", + "members":{ + "findings":{ + "shape":"Findings", + "documentation":"

      A list of findings generated by the specified scan.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A pagination token. You can use this in future calls to GetFindings to continue listing results after the current page.

      " + } + } + }, + "GetMetricsSummaryRequest":{ + "type":"structure", + "required":["date"], + "members":{ + "date":{ + "shape":"Timestamp", + "documentation":"

      The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years since metrics data is only stored for two years. If a date outside of this range is passed, the response will be empty.

      ", + "location":"querystring", + "locationName":"date" + } + } + }, + "GetMetricsSummaryResponse":{ + "type":"structure", + "members":{ + "metricsSummary":{ + "shape":"MetricsSummary", + "documentation":"

      The summary metrics from the specified date.

      " + } + } + }, + "GetScanRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "runId":{ + "shape":"Uuid", + "documentation":"

      UUID that identifies the individual scan run you want to view details about. You retrieve this when you call the CreateScan operation. Defaults to the latest scan run if missing.

      ", + "location":"querystring", + "locationName":"runId" + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan you want to view details about.

      ", + "location":"uri", + "locationName":"scanName" + } + } + }, + "GetScanResponse":{ + "type":"structure", + "required":[ + "analysisType", + "createdAt", + "runId", + "scanName", + "scanState" + ], + "members":{ + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

      The type of analysis CodeGuru Security performed in the scan, either Security or All. The Security type only generates findings related to security. The All type generates both security findings and quality findings.

      " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

      The time the scan was created.

      " + }, + "numberOfRevisions":{ + "shape":"Long", + "documentation":"

      The number of times a scan has been re-run on a revised resource.

      " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

      UUID that identifies the individual scan run.

      " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan.

      " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN for the scan name.

      " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

      The current state of the scan. Pass either InProgress, Successful, or Failed.

      " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

      The time when the scan was last updated. Only available for STANDARD scan types.

      " + } + } + }, + "HeaderKey":{ + "type":"string", + "min":1 + }, + "HeaderValue":{ + "type":"string", + "min":1 + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "error":{ + "shape":"String", + "documentation":"

      The internal error encountered by the server.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + } + }, + "documentation":"

      The server encountered an internal error and is unable to complete the request.

      ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws:kms:[\\S]+:[\\d]{12}:key\\/(([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})|(mrk-[0-9a-zA-Z]{32}))$" + }, + "ListFindingsMetricsRequest":{ + "type":"structure", + "required":[ + "endDate", + "startDate" + ], + "members":{ + "endDate":{ + "shape":"Timestamp", + "documentation":"

      The end date of the interval which you want to retrieve metrics from.

      ", + "location":"querystring", + "locationName":"endDate" + }, + "maxResults":{ + "shape":"ListFindingsMetricsRequestMaxResultsInteger", + "documentation":"

      The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

      The start date of the interval which you want to retrieve metrics from.

      ", + "location":"querystring", + "locationName":"startDate" + } + } + }, + "ListFindingsMetricsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListFindingsMetricsResponse":{ + "type":"structure", + "members":{ + "findingsMetrics":{ + "shape":"FindingsMetricList", + "documentation":"

      A list of AccountFindingsMetric objects retrieved from the specified time interval.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A pagination token. You can use this in future calls to ListFindingMetrics to continue listing results after the current page.

      " + } + } + }, + "ListScansRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListScansRequestMaxResultsInteger", + "documentation":"

      The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListScansRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListScansResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A pagination token. You can use this in future calls to ListScans to continue listing results after the current page.

      " + }, + "summaries":{ + "shape":"ScanSummaries", + "documentation":"

      A list of ScanSummary objects with information about all scans in an account.

      " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

      ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

      An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts:

      • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

      • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

      " + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "MetricsSummary":{ + "type":"structure", + "members":{ + "categoriesWithMostFindings":{ + "shape":"CategoriesWithMostFindings", + "documentation":"

      A list of CategoryWithFindingNum objects for the top 5 finding categories with the most open findings in an account.

      " + }, + "date":{ + "shape":"Timestamp", + "documentation":"

      The date from which the metrics summary information was retrieved.

      " + }, + "openFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

      The number of open findings of each severity in an account.

      " + }, + "scansWithMostOpenCriticalFindings":{ + "shape":"ScansWithMostOpenCriticalFindings", + "documentation":"

      A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings in an account.

      " + }, + "scansWithMostOpenFindings":{ + "shape":"ScansWithMostOpenFindings", + "documentation":"

      A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings in an account.

      " + } + }, + "documentation":"

      Information about summary metrics in an account.

      " + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[\\S]+$" + }, + "Recommendation":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

      The recommended course of action to remediate the finding.

      " + }, + "url":{ + "shape":"String", + "documentation":"

      The URL address to the recommendation for remediating the finding.

      " + } + }, + "documentation":"

      Information about the recommended course of action to remediate a finding.

      " + }, + "ReferenceUrls":{ + "type":"list", + "member":{"shape":"String"} + }, + "RelatedVulnerabilities":{ + "type":"list", + "member":{"shape":"String"} + }, + "Remediation":{ + "type":"structure", + "members":{ + "recommendation":{ + "shape":"Recommendation", + "documentation":"

      An object that contains information about the recommended course of action to remediate a finding.

      " + }, + "suggestedFixes":{ + "shape":"SuggestedFixes", + "documentation":"

      A list of SuggestedFix objects. Each object contains information about a suggested code fix to remediate the finding.

      " + } + }, + "documentation":"

      Information about how to remediate a finding.

      " + }, + "RequestHeaderMap":{ + "type":"map", + "key":{"shape":"HeaderKey"}, + "value":{"shape":"HeaderValue"}, + "sensitive":true + }, + "Resource":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

      The identifier for the resource.

      " + }, + "subResourceId":{ + "shape":"String", + "documentation":"

      The identifier for a section of the resource, such as an AWS Lambda layer.

      " + } + }, + "documentation":"

      Information about a resource, such as an Amazon S3 bucket or AWS Lambda function, that contains a finding.

      " + }, + "ResourceId":{ + "type":"structure", + "members":{ + "codeArtifactId":{ + "shape":"Uuid", + "documentation":"

      The identifier for the code file uploaded to the resource where a finding was detected.

      " + } + }, + "documentation":"

      The identifier for a resource object that contains resources where a finding was detected.

      ", + "union":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "resourceId", + "resourceType" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

      The identifier for the error.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + }, + "resourceId":{ + "shape":"String", + "documentation":"

      The identifier for the resource that was not found.

      " + }, + "resourceType":{ + "shape":"String", + "documentation":"

      The type of resource that was not found.

      " + } + }, + "documentation":"

      The resource specified in the request was not found.

      ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3Url":{ + "type":"string", + "min":1, + "sensitive":true + }, + "ScanName":{ + "type":"string", + "max":140, + "min":1, + "pattern":"^[a-zA-Z0-9-_$:.]*$" + }, + "ScanNameArn":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^arn:aws:codeguru-security:[\\S]+:[\\d]{12}:scans\\/[a-zA-Z0-9-_$:.]*$" + }, + "ScanNameWithFindingNum":{ + "type":"structure", + "members":{ + "findingNumber":{ + "shape":"Integer", + "documentation":"

      The number of open findings generated by a scan.

      " + }, + "scanName":{ + "shape":"String", + "documentation":"

      The name of the scan.

      " + } + }, + "documentation":"

      Information about a scan with open findings.

      " + }, + "ScanState":{ + "type":"string", + "enum":[ + "InProgress", + "Successful", + "Failed" + ] + }, + "ScanSummaries":{ + "type":"list", + "member":{"shape":"ScanSummary"} + }, + "ScanSummary":{ + "type":"structure", + "required":[ + "createdAt", + "runId", + "scanName", + "scanState" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

      The time when the scan was created.

      " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

      The identifier for the scan run.

      " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

      The name of the scan.

      " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN for the scan name.

      " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

      The state of the scan. A scan can be In Progress, Complete, or Failed.

      " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

      The time the scan was last updated. A scan is updated when it is re-run.

      " + } + }, + "documentation":"

      Information about a scan.

      " + }, + "ScanType":{ + "type":"string", + "enum":[ + "Standard", + "Express" + ] + }, + "ScansWithMostOpenCriticalFindings":{ + "type":"list", + "member":{"shape":"ScanNameWithFindingNum"}, + "max":3, + "min":0 + }, + "ScansWithMostOpenFindings":{ + "type":"list", + "member":{"shape":"ScanNameWithFindingNum"}, + "max":3, + "min":0 + }, + "Severity":{ + "type":"string", + "enum":[ + "Critical", + "High", + "Medium", + "Low", + "Info" + ] + }, + "Status":{ + "type":"string", + "enum":[ + "Closed", + "Open", + "All" + ] + }, + "String":{"type":"string"}, + "SuggestedFix":{ + "type":"structure", + "members":{ + "code":{ + "shape":"String", + "documentation":"

      The suggested code to add to your file.

      " + }, + "description":{ + "shape":"String", + "documentation":"

      A description of the suggested code fix and why it is being suggested.

      " + } + }, + "documentation":"

      Information about the suggested code fix to remediate a finding.

      " + }, + "SuggestedFixes":{ + "type":"list", + "member":{"shape":"SuggestedFix"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

      ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts:

      • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

      • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

      " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

      The identifier for the error.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

      The identifier for the originating quota.

      " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

      The identifier for the originating service.

      " + } + }, + "documentation":"

      The request was denied due to request throttling.

      ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

      The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

      ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

      A list of keys for each tag you want to remove from a scan.

      ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccountConfigurationRequest":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

      The KMS key ARN you want to use for encryption. Defaults to service-side encryption if missing.

      " + } + } + }, + "UpdateAccountConfigurationResponse":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

      An EncryptionConfig object that contains the KMS key ARN to use for encryption.

      " + } + } + }, + "Uuid":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "reason" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

      The identifier for the error.

      " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

      The field that caused the error, if applicable.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Description of the error.

      " + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

      The reason the request failed validation.

      " + } + }, + "documentation":"

      The input fails to satisfy the specified constraints.

      ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

      Describes the exception.

      " + }, + "name":{ + "shape":"String", + "documentation":"

      The name of the exception.

      " + } + }, + "documentation":"

      Information about a validation exception.

      " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other", + "lambdaCodeShaMisMatch" + ] + }, + "Vulnerability":{ + "type":"structure", + "members":{ + "filePath":{ + "shape":"FilePath", + "documentation":"

      An object that describes the location of the detected security vulnerability in your code.

      " + }, + "id":{ + "shape":"String", + "documentation":"

      The identifier for the vulnerability.

      " + }, + "itemCount":{ + "shape":"Integer", + "documentation":"

      The number of times the vulnerability appears in your code.

      " + }, + "referenceUrls":{ + "shape":"ReferenceUrls", + "documentation":"

      One or more URL addresses that contain details about a vulnerability.

      " + }, + "relatedVulnerabilities":{ + "shape":"RelatedVulnerabilities", + "documentation":"

      One or more vulnerabilities that are related to the vulnerability being described.

      " + } + }, + "documentation":"

      Information about a security vulnerability that Amazon CodeGuru Security detected.

      " + } + }, + "documentation":"

      This section provides documentation for the Amazon CodeGuru Security API operations. CodeGuru Security is a service that uses program analysis and machine learning to detect security policy violations and vulnerabilities, and recommends ways to address these security risks.

      By proactively detecting and providing recommendations for addressing security risks, CodeGuru Security improves the overall security of your application code. For more information about CodeGuru Security, see the Amazon CodeGuru Security User Guide.

      " +} diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 23bcd9ef1112..bfebd0b8e32b 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codepipeline/src/main/resources/codegen-resources/endpoint-rule-set.json index 2a14c1314828..7e6b2d6bcf15 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/codepipeline/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codepipeline-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codepipeline-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codepipeline-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codepipeline.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://codepipeline-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://codepipeline.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codepipeline.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://codepipeline.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/codepipeline/src/main/resources/codegen-resources/endpoint-tests.json b/services/codepipeline/src/main/resources/codegen-resources/endpoint-tests.json index 00a04f5b18d7..ef8ca781916a 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/codepipeline/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,42 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.ap-south-1.api.aws" + "url": "https://codepipeline.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.ap-south-1.amazonaws.com" + "url": "https://codepipeline.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.ap-south-1.api.aws" + "url": "https://codepipeline.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,1218 +47,547 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-south-1.api.aws" + "url": "https://codepipeline.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-south-1.amazonaws.com" + "url": "https://codepipeline.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-south-1.api.aws" + "url": "https://codepipeline.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-south-1.amazonaws.com" + "url": "https://codepipeline-fips.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-gov-east-1.api.aws" + "url": "https://codepipeline.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-gov-east-1.amazonaws.com" + "url": "https://codepipeline.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-gov-east-1.api.aws" + "url": "https://codepipeline.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-gov-east-1.amazonaws.com" + "url": "https://codepipeline.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.ca-central-1.api.aws" + "url": "https://codepipeline.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.ca-central-1.amazonaws.com" + "url": "https://codepipeline.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.ca-central-1.api.aws" + "url": "https://codepipeline.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.ca-central-1.amazonaws.com" + "url": "https://codepipeline.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-central-1.api.aws" + "url": "https://codepipeline-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-central-1.amazonaws.com" + "url": "https://codepipeline.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-central-1.api.aws" + "url": "https://codepipeline-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-central-1.amazonaws.com" + "url": "https://codepipeline.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-west-1.api.aws" + "url": "https://codepipeline-fips.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "us-west-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-west-1.amazonaws.com" + "url": "https://codepipeline.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-west-1.api.aws" + "url": "https://codepipeline-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-west-1.amazonaws.com" + "url": "https://codepipeline-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-west-2.api.aws" + "url": "https://codepipeline.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.us-west-2.amazonaws.com" + "url": "https://codepipeline.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-west-2.api.aws" + "url": "https://codepipeline.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-west-2.amazonaws.com" + "url": "https://codepipeline-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.af-south-1.api.aws" + "url": "https://codepipeline-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.af-south-1.amazonaws.com" + "url": "https://codepipeline.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.af-south-1.api.aws" + "url": "https://codepipeline.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.af-south-1.amazonaws.com" + "url": "https://codepipeline-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-north-1.api.aws" + "url": "https://codepipeline-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-north-1.amazonaws.com" + "url": "https://codepipeline-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-north-1.api.aws" + "url": "https://codepipeline.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-north-1.amazonaws.com" + "url": "https://codepipeline.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codepipeline-fips.eu-west-3.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-west-3.amazonaws.com" + "url": "https://codepipeline-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codepipeline.eu-west-3.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.eu-west-3.amazonaws.com" + "url": "https://codepipeline.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codepipeline-fips.eu-west-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline-fips.eu-west-2.amazonaws.com" + "url": "https://codepipeline-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-east-1.api.aws" + "url": "https://codepipeline.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://codepipeline.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codepipeline.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1268,9 +597,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1280,11 +609,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/codepipeline/src/main/resources/codegen-resources/service-2.json b/services/codepipeline/src/main/resources/codegen-resources/service-2.json index 9f696d57dcba..47c7455521ac 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/service-2.json +++ b/services/codepipeline/src/main/resources/codegen-resources/service-2.json @@ -59,7 +59,7 @@ {"shape":"InvalidTagsException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.

      " + "documentation":"

      Creates a new custom action that can be used in all pipelines associated with the Amazon Web Services account. Only used for custom actions.

      " }, "CreatePipeline":{ "name":"CreatePipeline", @@ -121,7 +121,7 @@ {"shape":"ValidationException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Deletes a previously created webhook by name. Deleting the webhook stops AWS CodePipeline from starting a pipeline every time an external event occurs. The API returns successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.

      " + "documentation":"

      Deletes a previously created webhook by name. Deleting the webhook stops CodePipeline from starting a pipeline every time an external event occurs. The API returns successfully when trying to delete a webhook that is already deleted. If a deleted webhook is re-created by calling PutWebhook with the same name, it will have a different URL.

      " }, "DeregisterWebhookWithThirdParty":{ "name":"DeregisterWebhookWithThirdParty", @@ -191,7 +191,7 @@ {"shape":"ValidationException"}, {"shape":"JobNotFoundException"} ], - "documentation":"

      Returns information about a job. Used for custom actions only.

      When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " + "documentation":"

      Returns information about a job. Used for custom actions only.

      When this API is called, CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " }, "GetPipeline":{ "name":"GetPipeline", @@ -251,7 +251,7 @@ {"shape":"InvalidClientTokenException"}, {"shape":"InvalidJobException"} ], - "documentation":"

      Requests the details of a job for a third party action. Used for partner actions only.

      When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " + "documentation":"

      Requests the details of a job for a third party action. Used for partner actions only.

      When this API is called, CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " }, "ListActionExecutions":{ "name":"ListActionExecutions", @@ -281,7 +281,7 @@ {"shape":"ValidationException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

      Gets a summary of all AWS CodePipeline action types associated with your account.

      " + "documentation":"

      Gets a summary of all CodePipeline action types associated with your account.

      " }, "ListPipelineExecutions":{ "name":"ListPipelineExecutions", @@ -340,7 +340,7 @@ {"shape":"ValidationException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

      Gets a listing of all the webhooks in this AWS Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook.

      " + "documentation":"

      Gets a listing of all the webhooks in this Amazon Web Services Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for each webhook.

      " }, "PollForJobs":{ "name":"PollForJobs", @@ -354,7 +354,7 @@ {"shape":"ValidationException"}, {"shape":"ActionTypeNotFoundException"} ], - "documentation":"

      Returns information about any jobs for AWS CodePipeline to act on. PollForJobs is valid only for action types with \"Custom\" in the owner field. If the action type contains \"AWS\" or \"ThirdParty\" in the owner field, the PollForJobs action returns an error.

      When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " + "documentation":"

      Returns information about any jobs for CodePipeline to act on. PollForJobs is valid only for action types with \"Custom\" in the owner field. If the action type contains AWS or ThirdParty in the owner field, the PollForJobs action returns an error.

      When this API is called, CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts. This API also returns any secret values defined for the action.

      " }, "PollForThirdPartyJobs":{ "name":"PollForThirdPartyJobs", @@ -368,7 +368,7 @@ {"shape":"ActionTypeNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

      Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.

      When this API is called, AWS CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts.

      " + "documentation":"

      Determines whether there are any third party jobs for a job worker to act on. Used for partner actions only.

      When this API is called, CodePipeline returns temporary credentials for the S3 bucket used to store artifacts for the pipeline, if the action requires access to that S3 bucket for input or output artifacts.

      " }, "PutActionRevision":{ "name":"PutActionRevision", @@ -384,7 +384,7 @@ {"shape":"ActionNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

      Provides information to AWS CodePipeline about new revisions to a source.

      " + "documentation":"

      Provides information to CodePipeline about new revisions to a source.

      " }, "PutApprovalResult":{ "name":"PutApprovalResult", @@ -402,7 +402,7 @@ {"shape":"ActionNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

      Provides the response to a manual approval request to AWS CodePipeline. Valid responses include Approved and Rejected.

      " + "documentation":"

      Provides the response to a manual approval request to CodePipeline. Valid responses include Approved and Rejected.

      " }, "PutJobFailureResult":{ "name":"PutJobFailureResult", @@ -580,7 +580,7 @@ {"shape":"InvalidTagsException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

      Removes tags from an AWS resource.

      " + "documentation":"

      Removes tags from an Amazon Web Services resource.

      " }, "UpdateActionType":{ "name":"UpdateActionType", @@ -642,7 +642,7 @@ "documentation":"

      The token for the session.

      " } }, - "documentation":"

      Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

      ", + "documentation":"

      Represents an Amazon Web Services session credentials object. These credentials are temporary credentials that are issued by Amazon Web Services Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in CodePipeline.

      ", "sensitive":true }, "AccessKeyId":{ @@ -666,7 +666,7 @@ }, "nonce":{ "shape":"Nonce", - "documentation":"

      A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response of the PollForJobs request that returned this job.

      " + "documentation":"

      A system-generated random number that CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response of the PollForJobs request that returned this job.

      " } }, "documentation":"

      Represents the input of an AcknowledgeJob action.

      " @@ -695,7 +695,7 @@ }, "nonce":{ "shape":"Nonce", - "documentation":"

      A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response to a GetThirdPartyJobDetails request.

      " + "documentation":"

      A system-generated random number that CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response to a GetThirdPartyJobDetails request.

      " }, "clientToken":{ "shape":"ClientToken", @@ -844,7 +844,7 @@ }, "configuration":{ "shape":"ActionConfigurationMap", - "documentation":"

      The action's configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline. For the list of configuration properties for the AWS CloudFormation action type in CodePipeline, see Configuration Properties Reference in the AWS CloudFormation User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the AWS CloudFormation User Guide.

      The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:

      JSON:

      \"Configuration\" : { Key : Value },

      " + "documentation":"

      The action's configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline. For the list of configuration properties for the CloudFormation action type in CodePipeline, see Configuration Properties Reference in the CloudFormation User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the CloudFormation User Guide.

      The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows:

      JSON:

      \"Configuration\" : { Key : Value },

      " }, "outputArtifacts":{ "shape":"OutputArtifactList", @@ -860,7 +860,7 @@ }, "region":{ "shape":"AWSRegionName", - "documentation":"

      The action declaration's AWS Region, such as us-east-1.

      " + "documentation":"

      The action declaration's Amazon Web Services Region, such as us-east-1.

      " }, "namespace":{ "shape":"ActionNamespace", @@ -902,7 +902,7 @@ }, "externalExecutionUrl":{ "shape":"Url", - "documentation":"

      The URL of a resource external to AWS that is used when running the action (for example, an external repository URL).

      " + "documentation":"

      The URL of a resource external to Amazon Web Services that is used when running the action (for example, an external repository URL).

      " }, "percentComplete":{ "shape":"Percentage", @@ -910,7 +910,7 @@ }, "errorDetails":{ "shape":"ErrorDetails", - "documentation":"

      The details of an error returned by a URL external to AWS.

      " + "documentation":"

      The details of an error returned by a URL external to Amazon Web Services.

      " } }, "documentation":"

      Represents information about the run of an action.

      " @@ -994,7 +994,7 @@ }, "region":{ "shape":"AWSRegionName", - "documentation":"

      The AWS Region for the action, such as us-east-1.

      " + "documentation":"

      The Amazon Web Services Region for the action, such as us-east-1.

      " }, "inputArtifacts":{ "shape":"ArtifactDetailList", @@ -1266,7 +1266,7 @@ }, "policyStatementsTemplate":{ "shape":"PolicyStatementsTemplate", - "documentation":"

      The policy statement that specifies the permissions in the CodePipeline customer’s account that are needed to successfully run an action.

      To grant permission to another account, specify the account ID as the Principal, a domain-style identifier defined by the service, for example codepipeline.amazonaws.com.

      The size of the passed JSON policy document cannot exceed 2048 characters.

      " + "documentation":"

      The policy statement that specifies the permissions in the CodePipeline customer account that are needed to successfully run an action.

      To grant permission to another account, specify the account ID as the Principal, a domain-style identifier defined by the service, for example codepipeline.amazonaws.com.

      The size of the passed JSON policy document cannot exceed 2048 characters.

      " }, "jobTimeout":{ "shape":"JobTimeout", @@ -1294,7 +1294,7 @@ }, "provider":{ "shape":"ActionProvider", - "documentation":"

      The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline.

      " + "documentation":"

      The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of CodeDeploy, which would be specified as CodeDeploy. For more information, see Valid Action Types and Providers in CodePipeline.

      " }, "version":{ "shape":"Version", @@ -1352,7 +1352,7 @@ "members":{ "allowedAccounts":{ "shape":"AllowedAccounts", - "documentation":"

      A list of AWS account IDs with access to use the action type in their pipelines.

      " + "documentation":"

      A list of Amazon Web Services account IDs with access to use the action type in their pipelines.

      " } }, "documentation":"

      Details identifying the users with permissions to use the action type.

      " @@ -1407,15 +1407,15 @@ }, "entityUrlTemplate":{ "shape":"UrlTemplate", - "documentation":"

      The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.

      " + "documentation":"

      The URL returned to the CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for a CodeDeploy deployment group. This link is provided as part of the action display in the pipeline.

      " }, "executionUrlTemplate":{ "shape":"UrlTemplate", - "documentation":"

      The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.

      " + "documentation":"

      The URL returned to the CodePipeline console that contains a link to the top-level landing page for the external system, such as the console page for CodeDeploy. This link is shown on the pipeline view page in the CodePipeline console and provides a link to the execution entity of the external action.

      " }, "revisionUrlTemplate":{ "shape":"UrlTemplate", - "documentation":"

      The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

      " + "documentation":"

      The URL returned to the CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.

      " } }, "documentation":"

      Returns information about the settings for an action type.

      " @@ -1509,7 +1509,7 @@ "documentation":"

      The location of an artifact.

      " } }, - "documentation":"

      Represents information about an artifact that is worked on by actions in the pipeline.

      " + "documentation":"

      Artifacts are the files that are worked on by actions in the pipeline. See the action configuration for each action for details about artifact parameters. For example, the S3 source action artifact is a file name (or file path), and the files are generally provided as a ZIP file. Example artifact name: SampleApp_Windows.zip

      " }, "ArtifactDetail":{ "type":"structure", @@ -1592,7 +1592,7 @@ }, "revisionSummary":{ "shape":"RevisionSummary", - "documentation":"

      Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.

      " + "documentation":"

      Summary information about the most recent revision of the artifact. For GitHub and CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.

      " }, "created":{ "shape":"Timestamp", @@ -1600,7 +1600,7 @@ }, "revisionUrl":{ "shape":"Url", - "documentation":"

      The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.

      " + "documentation":"

      The commit ID for the artifact revision. For artifacts stored in GitHub or CodeCommit repositories, the commit ID is linked to a commit details page.

      " } }, "documentation":"

      Represents revision details of an artifact.

      " @@ -1622,11 +1622,11 @@ }, "location":{ "shape":"ArtifactStoreLocation", - "documentation":"

      The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same AWS Region as the pipeline to store your pipeline artifacts.

      " + "documentation":"

      The S3 bucket used for storing the artifacts for a pipeline. You can specify the name of an S3 bucket but not a folder in the bucket. A folder to contain the pipeline artifacts is created for you based on the name of the pipeline. You can use any S3 bucket in the same Amazon Web Services Region as the pipeline to store your pipeline artifacts.

      " }, "encryptionKey":{ "shape":"EncryptionKey", - "documentation":"

      The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.

      " + "documentation":"

      The encryption key used to encrypt the data in the artifact store, such as an Amazon Web Services Key Management Service key. If this is undefined, the default key for Amazon S3 is used.

      " } }, "documentation":"

      The S3 bucket where artifacts for the pipeline are stored.

      You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

      " @@ -1727,7 +1727,7 @@ }, "provider":{ "shape":"ActionProvider", - "documentation":"

      The provider of the service used in the custom action, such as AWS CodeDeploy.

      " + "documentation":"

      The provider of the service used in the custom action, such as CodeDeploy.

      " }, "version":{ "shape":"Version", @@ -1840,7 +1840,7 @@ }, "provider":{ "shape":"ActionProvider", - "documentation":"

      The provider of the service used in the custom action, such as AWS CodeDeploy.

      " + "documentation":"

      The provider of the service used in the custom action, such as CodeDeploy.

      " }, "version":{ "shape":"Version", @@ -1969,14 +1969,14 @@ "members":{ "id":{ "shape":"EncryptionKeyId", - "documentation":"

      The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.

      Aliases are recognized only in the account that created the customer master key (CMK). For cross-account actions, you can only use the key ID or key ARN to identify the key.

      " + "documentation":"

      The ID used to identify the key. For an Amazon Web Services KMS key, you can use the key ID, the key ARN, or the alias ARN.

      Aliases are recognized only in the account that created the KMS key. For cross-account actions, you can only use the key ID or key ARN to identify the key. Cross-account actions involve using the role from the other account (AccountB), so specifying the key ID will use the key from the other account (AccountB).

      " }, "type":{ "shape":"EncryptionKeyType", - "documentation":"

      The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.

      " + "documentation":"

      The type of encryption key, such as an Amazon Web Services KMS key. When creating or updating a pipeline, the value must be set to 'KMS'.

      " } }, - "documentation":"

      Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.

      " + "documentation":"

      Represents information about the key used to encrypt data in the artifact store, such as an Amazon Web Services Key Management Service (Key Management Service) key.

      " }, "EncryptionKeyId":{ "type":"string", @@ -1999,7 +1999,7 @@ "documentation":"

      The text of the error message.

      " } }, - "documentation":"

      Represents information about an error in AWS CodePipeline.

      " + "documentation":"

      Represents information about an error in CodePipeline.

      " }, "ExecutionDetails":{ "type":"structure", @@ -2010,7 +2010,7 @@ }, "externalExecutionId":{ "shape":"ExecutionId", - "documentation":"

      The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.

      " + "documentation":"

      The system-generated unique ID of this action used to identify this job worker in any external systems, such as CodeDeploy.

      " }, "percentComplete":{ "shape":"Percentage", @@ -2190,7 +2190,7 @@ "members":{ "name":{ "shape":"PipelineName", - "documentation":"

      The name of the pipeline for which you want to get information. Pipeline names must be unique under an AWS user account.

      " + "documentation":"

      The name of the pipeline for which you want to get information. Pipeline names must be unique in an Amazon Web Services account.

      " }, "version":{ "shape":"PipelineVersion", @@ -2284,7 +2284,7 @@ "members":{ "name":{ "shape":"ArtifactName", - "documentation":"

      The name of the artifact to be worked on (for example, \"My App\").

      The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

      " + "documentation":"

      The name of the artifact to be worked on (for example, \"My App\").

      Artifacts are the files that are worked on by actions in the pipeline. See the action configuration for each action for details about artifact parameters. For example, the S3 source action input artifact is a file name (or file path), and the files are generally provided as a ZIP file. Example artifact name: SampleApp_Windows.zip

      The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.

      " } }, "documentation":"

      Represents information about an artifact to be worked on, such as a test or build artifact.

      " @@ -2406,11 +2406,11 @@ }, "nonce":{ "shape":"Nonce", - "documentation":"

      A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeJob request.

      " + "documentation":"

      A system-generated random number that CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeJob request.

      " }, "accountId":{ "shape":"AccountId", - "documentation":"

      The ID of the AWS account to use when performing the job.

      " + "documentation":"

      The ID of the Amazon Web Services account to use when performing the job.

      " } }, "documentation":"

      Represents information about a job.

      " @@ -2440,15 +2440,15 @@ }, "artifactCredentials":{ "shape":"AWSSessionCredentials", - "documentation":"

      Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in AWS CodePipeline.

      " + "documentation":"

      Represents an Amazon Web Services session credentials object. These credentials are temporary credentials that are issued by Amazon Web Services Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifacts for the pipeline in CodePipeline.

      " }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

      A system-generated token, such as a AWS CodeDeploy deployment ID, required by a job to continue the job asynchronously.

      " + "documentation":"

      A system-generated token, such as a deployment ID, required by a job to continue the job asynchronously.

      " }, "encryptionKey":{ "shape":"EncryptionKey", - "documentation":"

      Represents information about the key used to encrypt data in the artifact store, such as an AWS Key Management Service (AWS KMS) key.

      " + "documentation":"

      Represents information about the key used to encrypt data in the artifact store, such as an KMS key.

      " } }, "documentation":"

      Represents other information about a job required for a job worker to complete the job.

      " @@ -2466,7 +2466,7 @@ }, "accountId":{ "shape":"AccountId", - "documentation":"

      The AWS account ID associated with the job.

      " + "documentation":"

      The Amazon Web Services account ID associated with the job.

      " } }, "documentation":"

      Represents information about the details of a job.

      " @@ -2546,7 +2546,7 @@ "type":"structure", "members":{ }, - "documentation":"

      The number of pipelines associated with the AWS account has exceeded the limit allowed for the account.

      ", + "documentation":"

      The number of pipelines associated with the Amazon Web Services account has exceeded the limit allowed for the account.

      ", "exception":true }, "ListActionExecutionsInput":{ @@ -2831,7 +2831,7 @@ "type":"structure", "members":{ }, - "documentation":"

      The stage has failed in a later run of the pipeline and the pipelineExecutionId associated with the request is out of date.

      ", + "documentation":"

      The stage has failed in a later run of the pipeline and the pipelineExecutionId associated with the request is out of date.

      ", "exception":true }, "OutputArtifact":{ @@ -2916,7 +2916,7 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.

      " + "documentation":"

      The Amazon Resource Name (ARN) for CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.

      " }, "artifactStore":{ "shape":"ArtifactStore", @@ -2924,7 +2924,7 @@ }, "artifactStores":{ "shape":"ArtifactStoreMap", - "documentation":"

      A mapping of artifactStore objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.

      You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

      " + "documentation":"

      A mapping of artifactStore objects and their corresponding Amazon Web Services Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.

      You must include either artifactStore or artifactStores in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use artifactStores.

      " }, "stages":{ "shape":"PipelineStageDeclarationList", @@ -3055,6 +3055,10 @@ "updated":{ "shape":"Timestamp", "documentation":"

      The date and time the pipeline was last updated, in timestamp format.

      " + }, + "pollingDisabledAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time that polling for source changes (periodic checks) was stopped for the pipeline, in timestamp format. You can migrate (update) a polling pipeline to use event-based change detection. For example, for a pipeline with a CodeCommit source, we recommend you migrate (update) your pipeline to use CloudWatch Events. To learn more, see Migrate polling pipelines to use event-based change detection in the CodePipeline User Guide.

      " } }, "documentation":"

      Information about a pipeline.

      " @@ -3309,7 +3313,7 @@ }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

      A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the custom action. When the action is complete, no continuation token should be supplied.

      " + "documentation":"

      A token generated by a job worker, such as a CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the custom action. When the action is complete, no continuation token should be supplied.

      " }, "executionDetails":{ "shape":"ExecutionDetails", @@ -3366,7 +3370,7 @@ }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

      A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the partner action. When the action is complete, no continuation token should be supplied.

      " + "documentation":"

      A token generated by a job worker, such as a CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs use this token to identify the running instance of the action. It can be reused to return more information about the progress of the partner action. When the action is complete, no continuation token should be supplied.

      " }, "executionDetails":{ "shape":"ExecutionDetails", @@ -3572,11 +3576,11 @@ }, "revisionSummary":{ "shape":"RevisionSummary", - "documentation":"

      Summary information about the most recent revision of the artifact. For GitHub and AWS CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.

      " + "documentation":"

      Summary information about the most recent revision of the artifact. For GitHub and CodeCommit repositories, the commit message. For Amazon S3 buckets or actions, the user-provided content of a codepipeline-artifact-revision-summary key specified in the object metadata.

      " }, "revisionUrl":{ "shape":"Url", - "documentation":"

      The commit ID for the artifact revision. For artifacts stored in GitHub or AWS CodeCommit repositories, the commit ID is linked to a commit details page.

      " + "documentation":"

      The commit ID for the artifact revision. For artifacts stored in GitHub or CodeCommit repositories, the commit ID is linked to a commit details page.

      " } }, "documentation":"

      Information about the version (or revision) of a source artifact that initiated a pipeline execution.

      " @@ -3854,10 +3858,10 @@ }, "jobId":{ "shape":"JobId", - "documentation":"

      The identifier used to identify the job in AWS CodePipeline.

      " + "documentation":"

      The identifier used to identify the job in CodePipeline.

      " } }, - "documentation":"

      A response to a PollForThirdPartyJobs request returned by AWS CodePipeline when there is a job to be worked on by a partner action.

      " + "documentation":"

      A response to a PollForThirdPartyJobs request returned by CodePipeline when there is a job to be worked on by a partner action.

      " }, "ThirdPartyJobData":{ "type":"structure", @@ -3884,15 +3888,15 @@ }, "artifactCredentials":{ "shape":"AWSSessionCredentials", - "documentation":"

      Represents an AWS session credentials object. These credentials are temporary credentials that are issued by AWS Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in AWS CodePipeline.

      " + "documentation":"

      Represents an Amazon Web Services session credentials object. These credentials are temporary credentials that are issued by Amazon Web Services Secure Token Service (STS). They can be used to access input and output artifacts in the S3 bucket used to store artifact for the pipeline in CodePipeline.

      " }, "continuationToken":{ "shape":"ContinuationToken", - "documentation":"

      A system-generated token, such as a AWS CodeDeploy deployment ID, that a job requires to continue the job asynchronously.

      " + "documentation":"

      A system-generated token, such as a CodeDeploy deployment ID, that a job requires to continue the job asynchronously.

      " }, "encryptionKey":{ "shape":"EncryptionKey", - "documentation":"

      The encryption key used to encrypt and decrypt data in the artifact store for the pipeline, such as an AWS Key Management Service (AWS KMS) key. This is optional and might not be present.

      " + "documentation":"

      The encryption key used to encrypt and decrypt data in the artifact store for the pipeline, such as an Amazon Web Services Key Management Service (Amazon Web Services KMS) key. This is optional and might not be present.

      " } }, "documentation":"

      Represents information about the job data for a partner action.

      " @@ -3902,7 +3906,7 @@ "members":{ "id":{ "shape":"ThirdPartyJobId", - "documentation":"

      The identifier used to identify the job details in AWS CodePipeline.

      " + "documentation":"

      The identifier used to identify the job details in CodePipeline.

      " }, "data":{ "shape":"ThirdPartyJobData", @@ -3910,7 +3914,7 @@ }, "nonce":{ "shape":"Nonce", - "documentation":"

      A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeThirdPartyJob request.

      " + "documentation":"

      A system-generated random number that CodePipeline uses to ensure that the job is being worked on by only one job worker. Use this number in an AcknowledgeThirdPartyJob request.

      " } }, "documentation":"

      The details of a job sent in response to a GetThirdPartyJobDetails request.

      " @@ -4131,7 +4135,7 @@ }, "matchEquals":{ "shape":"MatchEquals", - "documentation":"

      The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is \"refs/heads/{Branch}\" and the target action has an action configuration property called \"Branch\" with a value of \"master\", the MatchEquals value is evaluated as \"refs/heads/master\". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements.

      " + "documentation":"

      The value selected by the JsonPath expression must match what is supplied in the MatchEquals field. Otherwise, the request is ignored. Properties from the target action configuration can be included as placeholders in this value by surrounding the action configuration key with curly brackets. For example, if the value supplied here is \"refs/heads/{Branch}\" and the target action has an action configuration property called \"Branch\" with a value of \"main\", the MatchEquals value is evaluated as \"refs/heads/main\". For a list of action configuration properties for built-in action types, see Pipeline Structure Reference Action Requirements.

      " } }, "documentation":"

      The event criteria that specify when a webhook notification is sent to your URL.

      " @@ -4165,5 +4169,5 @@ "min":1 } }, - "documentation":"AWS CodePipeline

      Overview

      This is the AWS CodePipeline API Reference. This guide provides descriptions of the actions and data types for AWS CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the AWS CodePipeline User Guide.

      You can use the AWS CodePipeline API to work with pipelines, stages, actions, and transitions.

      Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

      You can work with pipelines by calling:

      • CreatePipeline, which creates a uniquely named pipeline.

      • DeletePipeline, which deletes the specified pipeline.

      • GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN).

      • GetPipelineExecution, which returns information about a specific execution of a pipeline.

      • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.

      • ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details.

      • ListPipelines, which gets a summary of all of the pipelines associated with your account.

      • ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline.

      • StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline.

      • StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline.

      • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

      Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see AWS CodePipeline Pipeline Structure Reference.

      Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

      • Source

      • Build

      • Test

      • Deploy

      • Approval

      • Invoke

      Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

      You can work with transitions by calling:

      Using the API to integrate with AWS CodePipeline

      For third-party integrators or developers who want to create their own integrations with AWS CodePipeline, the expected sequence varies from the standard API user. To integrate with AWS CodePipeline, developers need to work with the following items:

      Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

      You can work with jobs by calling:

      Third party jobs, which are instances of an action created by a partner action and integrated into AWS CodePipeline. Partner actions are created by members of the AWS Partner Network.

      You can work with third party jobs by calling:

      " + "documentation":"CodePipeline

      Overview

      This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide.

      You can use the CodePipeline API to work with pipelines, stages, actions, and transitions.

      Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions.

      You can work with pipelines by calling:

      • CreatePipeline, which creates a uniquely named pipeline.

      • DeletePipeline, which deletes the specified pipeline.

      • GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN).

      • GetPipelineExecution, which returns information about a specific execution of a pipeline.

      • GetPipelineState, which returns information about the current state of the stages and actions of a pipeline.

      • ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details.

      • ListPipelines, which gets a summary of all of the pipelines associated with your account.

      • ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline.

      • StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline.

      • StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline.

      • UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline.

      Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference.

      Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are:

      • Source

      • Build

      • Test

      • Deploy

      • Approval

      • Invoke

      Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete.

      You can work with transitions by calling:

      Using the API to integrate with CodePipeline

      For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items:

      Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source.

      You can work with jobs by calling:

      Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network.

      You can work with third party jobs by calling:

      " } diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 9a13049d554a..6021d35f9477 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index e0921ff3850f..3140afd1ac21 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 488d4bda8136..81778b9b0ad9 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 36fa464dc02d..26701255304c 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index ec0a1233adee..4ffe2aa27ab3 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index aca98877eea9..ea9ad3a93cb8 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 59f282600ed5..e577e8f250f5 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index c20a28affe6b..70b08d8f6fd1 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-rule-set.json index 2e5664d7c60c..f7fd1b5f06ee 100644 --- a/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://comprehendmedical-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://comprehendmedical-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://comprehendmedical-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://comprehendmedical.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://comprehendmedical-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://comprehendmedical.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://comprehendmedical.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://comprehendmedical.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-tests.json b/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-tests.json index df371f360387..cf5da3e37949 100644 --- a/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/comprehendmedical/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,224 +1,224 @@ { "testCases": [ { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.ca-central-1.api.aws" + "url": "https://comprehendmedical.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.ca-central-1.amazonaws.com" + "url": "https://comprehendmedical.ca-central-1.amazonaws.com" } }, "params": { "Region": "ca-central-1", - "UseFIPS": true, + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.ca-central-1.api.aws" + "url": "https://comprehendmedical.eu-west-1.amazonaws.com" } }, "params": { - "Region": "ca-central-1", + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.ca-central-1.amazonaws.com" + "url": "https://comprehendmedical.eu-west-2.amazonaws.com" } }, "params": { - "Region": "ca-central-1", + "Region": "eu-west-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.us-west-2.api.aws" + "url": "https://comprehendmedical.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.us-west-2.amazonaws.com" + "url": "https://comprehendmedical-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-west-2", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.us-west-2.api.aws" + "url": "https://comprehendmedical.us-east-2.amazonaws.com" } }, "params": { - "Region": "us-west-2", + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.us-west-2.amazonaws.com" + "url": "https://comprehendmedical-fips.us-east-2.amazonaws.com" } }, "params": { - "Region": "us-west-2", - "UseFIPS": false, + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.eu-west-2.api.aws" + "url": "https://comprehendmedical.us-west-2.amazonaws.com" } }, "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.eu-west-2.amazonaws.com" + "url": "https://comprehendmedical-fips.us-west-2.amazonaws.com" } }, "params": { - "Region": "eu-west-2", + "Region": "us-west-2", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.eu-west-2.api.aws" + "url": "https://comprehendmedical-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-2", - "UseFIPS": false, + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.eu-west-2.amazonaws.com" + "url": "https://comprehendmedical.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-2", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.eu-west-1.api.aws" + "url": "https://comprehendmedical-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "eu-west-1", + "Region": "cn-north-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.eu-west-1.amazonaws.com" + "url": "https://comprehendmedical-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "eu-west-1", + "Region": "cn-north-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.eu-west-1.api.aws" + "url": "https://comprehendmedical.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "eu-west-1", + "Region": "cn-north-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.eu-west-1.amazonaws.com" + "url": "https://comprehendmedical.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "eu-west-1", + "Region": "cn-north-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.us-gov-west-1.api.aws" + "url": "https://comprehendmedical.us-gov-west-1.amazonaws.com" } }, "params": { "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -235,189 +235,155 @@ } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://comprehendmedical.us-gov-west-1.amazonaws.com" + "url": "https://comprehendmedical-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://comprehendmedical-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "Region": "ap-southeast-2", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.ap-southeast-2.amazonaws.com" + "url": "https://comprehendmedical-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.ap-southeast-2.api.aws" + "url": "https://comprehendmedical.us-gov-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-2", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.ap-southeast-2.amazonaws.com" + "url": "https://comprehendmedical.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://comprehendmedical-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.us-east-1.amazonaws.com" + "url": "https://comprehendmedical-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://comprehendmedical.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.us-east-1.amazonaws.com" + "url": "https://comprehendmedical.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://comprehendmedical-fips.us-east-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-2", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical-fips.us-east-2.amazonaws.com" + "url": "https://comprehendmedical-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-east-2", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://comprehendmedical.us-east-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-2", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://comprehendmedical.us-east-2.amazonaws.com" + "url": "https://comprehendmedical.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-east-2", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -430,6 +396,19 @@ "Endpoint": "https://example.com" } }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, { "documentation": "For custom endpoint with fips enabled and dualstack disabled", "expect": { @@ -453,6 +432,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json b/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json index d6ad65016602..8ed7cac3ccd0 100644 --- a/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json +++ b/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json @@ -1067,7 +1067,8 @@ "type":"string", "enum":[ "OVERLAP", - "SYSTEM_ORGAN_SITE" + "SYSTEM_ORGAN_SITE", + "QUALITY" ] }, "ICD10CMTrait":{ @@ -1473,7 +1474,9 @@ "TEST_UNIT", "DIRECTION", "SYSTEM_ORGAN_SITE", - "AMOUNT" + "AMOUNT", + "USAGE", + "QUALITY" ] }, "ResourceNotFoundException":{ @@ -1641,7 +1644,10 @@ }, "RxNormTraitName":{ "type":"string", - "enum":["NEGATION"] + "enum":[ + "NEGATION", + "PAST_HISTORY" + ] }, "S3Bucket":{ "type":"string", @@ -1834,7 +1840,8 @@ "TEST_VALUE", "TEST_UNITS", "DIRECTION", - "SYSTEM_ORGAN_SITE" + "SYSTEM_ORGAN_SITE", + "TEST_UNIT" ] }, "SNOMEDCTTrait":{ diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 08851a50e7eb..e8a88d8b56ae 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 67cc6ce1b96a..53a841b3d833 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -1499,7 +1499,9 @@ "RecommendationOptionsMigrationEffort", "EffectiveRecommendationPreferencesExternalMetricsSource", "InstanceState", - "Tags" + "Tags", + "ExternalMetricStatusCode", + "ExternalMetricStatusReason" ] }, "ExportableInstanceFields":{ @@ -1581,6 +1583,36 @@ "type":"list", "member":{"shape":"ExportableVolumeField"} }, + "ExternalMetricStatus":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"ExternalMetricStatusCode", + "documentation":"

      The status code for Compute Optimizer's integration with an external metrics provider.

      " + }, + "statusReason":{ + "shape":"ExternalMetricStatusReason", + "documentation":"

      The reason for Compute Optimizer's integration status with your external metric provider.

      " + } + }, + "documentation":"

      Describes Compute Optimizer's integration status with your chosen external metric provider. For example, Datadog.

      " + }, + "ExternalMetricStatusCode":{ + "type":"string", + "enum":[ + "NO_EXTERNAL_METRIC_SET", + "INTEGRATION_SUCCESS", + "DATADOG_INTEGRATION_ERROR", + "DYNATRACE_INTEGRATION_ERROR", + "NEWRELIC_INTEGRATION_ERROR", + "INSTANA_INTEGRATION_ERROR", + "INSUFFICIENT_DATADOG_METRICS", + "INSUFFICIENT_DYNATRACE_METRICS", + "INSUFFICIENT_NEWRELIC_METRICS", + "INSUFFICIENT_INSTANA_METRICS" + ] + }, + "ExternalMetricStatusReason":{"type":"string"}, "ExternalMetricsPreference":{ "type":"structure", "members":{ @@ -2247,6 +2279,10 @@ "tags":{ "shape":"Tags", "documentation":"

      A list of tags assigned to your Amazon EC2 instance recommendations.

      " + }, + "externalMetricStatus":{ + "shape":"ExternalMetricStatus", + "documentation":"

      An object that describes Compute Optimizer's integration status with your external metrics provider.

      " } }, "documentation":"

      Describes an Amazon EC2 instance recommendation.

      " diff --git a/services/config/pom.xml b/services/config/pom.xml index a13e8dd1b7d2..44d9d2d07acb 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/service-2.json b/services/config/src/main/resources/codegen-resources/service-2.json index 9a8b14290b64..5b5ffb3fda3b 100644 --- a/services/config/src/main/resources/codegen-resources/service-2.json +++ b/services/config/src/main/resources/codegen-resources/service-2.json @@ -403,7 +403,7 @@ "errors":[ {"shape":"NoSuchConfigurationRecorderException"} ], - "documentation":"

      Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account.

      Currently, you can specify only one configuration recorder per region in your account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.

      " + "documentation":"

      Returns the current status of the specified configuration recorder as well as the status of the last recording event for the recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account.

      >You can specify only one configuration recorder for each Amazon Web Services Region for each account. For a detailed status of recording events over time, add your Config events to Amazon CloudWatch metrics and use CloudWatch metrics.

      " }, "DescribeConfigurationRecorders":{ "name":"DescribeConfigurationRecorders", @@ -416,7 +416,7 @@ "errors":[ {"shape":"NoSuchConfigurationRecorderException"} ], - "documentation":"

      Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account.

      Currently, you can specify only one configuration recorder per region in your account.

      " + "documentation":"

      Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account.

      You can specify only one configuration recorder for each Amazon Web Services Region for each account.

      " }, "DescribeConformancePackCompliance":{ "name":"DescribeConformancePackCompliance", @@ -1058,7 +1058,7 @@ {"shape":"InvalidRoleException"}, {"shape":"InvalidRecordingGroupException"} ], - "documentation":"

      Creates a new configuration recorder to record the selected resource configurations.

      You can use this action to change the role roleARN or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.

      Currently, you can specify only one configuration recorder per region in your account.

      If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.

      " + "documentation":"

      Creates a new configuration recorder to record configuration changes for specified resource types.

      You can also use this action to change the roleARN or the recordingGroup of an existing recorder. For more information, see Managing the Configuration Recorder in the Config Developer Guide.

      You can specify only one configuration recorder for each Amazon Web Services Region for each account.

      If the configuration recorder does not have the recordingGroup field specified, the default is to record all supported resource types.

      " }, "PutConformancePack":{ "name":"PutConformancePack", @@ -1075,7 +1075,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MaxNumberOfConformancePacksExceededException"} ], - "documentation":"

      Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across an organization. For information on how many conformance packs you can have per account, see Service Limits in the Config Developer Guide.

      This API creates a service-linked role AWSServiceRoleForConfigConforms in your account. The service-linked role is created only when the role does not exist in your account.

      You must specify only one of the follow parameters: TemplateS3Uri, TemplateBody or TemplateSSMDocumentDetails.

      " + "documentation":"

      Creates or updates a conformance pack. A conformance pack is a collection of Config rules that can be easily deployed in an account and a region and across an organization. For information on how many conformance packs you can have per account, see Service Limits in the Config Developer Guide.

      This API creates a service-linked role AWSServiceRoleForConfigConforms in your account. The service-linked role is created only when the role does not exist in your account.

      You must specify only one of the follow parameters: TemplateS3Uri, TemplateBody or TemplateSSMDocumentDetails.

      " }, "PutDeliveryChannel":{ "name":"PutDeliveryChannel", @@ -1163,7 +1163,7 @@ {"shape":"OrganizationAllFeaturesNotEnabledException"}, {"shape":"NoAvailableOrganizationException"} ], - "documentation":"

      Deploys conformance packs across member accounts in an Amazon Web Services Organization. For information on how many organization conformance packs and how many Config rules you can have per account, see Service Limits in the Config Developer Guide.

      Only a management account and a delegated administrator can call this API. When calling this API with a delegated administrator, you must ensure Organizations ListDelegatedAdministrator permissions are added. An organization can have up to 3 delegated administrators.

      This API enables organization service access for config-multiaccountsetup.amazonaws.com through the EnableAWSServiceAccess action and creates a service-linked role AWSServiceRoleForConfigMultiAccountSetup in the management or delegated administrator account of your organization. The service-linked role is created only when the role does not exist in the caller account. To use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization register-delegate-admin for config-multiaccountsetup.amazonaws.com.

      Prerequisite: Ensure you call EnableAllFeatures API to enable all features in an organization.

      You must specify either the TemplateS3Uri or the TemplateBody parameter, but not both. If you provide both Config uses the TemplateS3Uri parameter and ignores the TemplateBody parameter.

      Config sets the state of a conformance pack to CREATE_IN_PROGRESS and UPDATE_IN_PROGRESS until the conformance pack is created or updated. You cannot update a conformance pack while it is in this state.

      " + "documentation":"

      Deploys conformance packs across member accounts in an Amazon Web Services Organization. For information on how many organization conformance packs and how many Config rules you can have per account, see Service Limits in the Config Developer Guide.

      Only a management account and a delegated administrator can call this API. When calling this API with a delegated administrator, you must ensure Organizations ListDelegatedAdministrator permissions are added. An organization can have up to 3 delegated administrators.

      This API enables organization service access for config-multiaccountsetup.amazonaws.com through the EnableAWSServiceAccess action and creates a service-linked role AWSServiceRoleForConfigMultiAccountSetup in the management or delegated administrator account of your organization. The service-linked role is created only when the role does not exist in the caller account. To use this API with delegated administrator, register a delegated administrator by calling Amazon Web Services Organization register-delegate-admin for config-multiaccountsetup.amazonaws.com.

      Prerequisite: Ensure you call EnableAllFeatures API to enable all features in an organization.

      You must specify either the TemplateS3Uri or the TemplateBody parameter, but not both. If you provide both Config uses the TemplateS3Uri parameter and ignores the TemplateBody parameter.

      Config sets the state of a conformance pack to CREATE_IN_PROGRESS and UPDATE_IN_PROGRESS until the conformance pack is created or updated. You cannot update a conformance pack while it is in this state.

      " }, "PutRemediationConfigurations":{ "name":"PutRemediationConfigurations", @@ -1191,7 +1191,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InsufficientPermissionsException"} ], - "documentation":"

      A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

      Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

      When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

      Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

      " + "documentation":"

      A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

      Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

      When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

      Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

      " }, "PutResourceConfig":{ "name":"PutResourceConfig", @@ -1251,7 +1251,7 @@ {"shape":"InvalidLimitException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

      Accepts a structured query language (SQL) SELECT command and an aggregator to query configuration state of Amazon Web Services resources across multiple accounts and regions, performs the corresponding search, and returns resource configurations matching the properties.

      For more information about query components, see the Query Components section in the Config Developer Guide.

      If you run an aggregation query (i.e., using GROUP BY or using aggregate functions such as COUNT; e.g., SELECT resourceId, COUNT(*) WHERE resourceType = 'AWS::IAM::Role' GROUP BY resourceId) and do not specify the MaxResults or the Limit query parameters, the default page size is set to 500.

      If you run a non-aggregation query (i.e., not using GROUP BY or aggregate function; e.g., SELECT * WHERE resourceType = 'AWS::IAM::Role') and do not specify the MaxResults or the Limit query parameters, the default page size is set to 25.

      " + "documentation":"

      Accepts a structured query language (SQL) SELECT command and an aggregator to query configuration state of Amazon Web Services resources across multiple accounts and regions, performs the corresponding search, and returns resource configurations matching the properties.

      For more information about query components, see the Query Components section in the Config Developer Guide.

      If you run an aggregation query (i.e., using GROUP BY or using aggregate functions such as COUNT; e.g., SELECT resourceId, COUNT(*) WHERE resourceType = 'AWS::IAM::Role' GROUP BY resourceId) and do not specify the MaxResults or the Limit query parameters, the default page size is set to 500.

      If you run a non-aggregation query (i.e., not using GROUP BY or aggregate function; e.g., SELECT * WHERE resourceType = 'AWS::IAM::Role') and do not specify the MaxResults or the Limit query parameters, the default page size is set to 25.

      " }, "SelectResourceConfig":{ "name":"SelectResourceConfig", @@ -2430,18 +2430,18 @@ "members":{ "name":{ "shape":"RecorderName", - "documentation":"

      The name of the recorder. By default, Config automatically assigns the name \"default\" when creating the configuration recorder. You cannot change the assigned name.

      " + "documentation":"

      The name of the configuration recorder. Config automatically assigns the name of \"default\" when creating the configuration recorder.

      You cannot change the name of the configuration recorder after it has been created. To change the configuration recorder name, you must delete it and create a new configuration recorder with a new name.

      " }, "roleARN":{ "shape":"String", - "documentation":"

      Amazon Resource Name (ARN) of the IAM role used to describe the Amazon Web Services resources associated with the account.

      While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.

      " + "documentation":"

      Amazon Resource Name (ARN) of the IAM role assumed by Config and used by the configuration recorder.

      While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.

      Pre-existing Config role

      If you have used an Amazon Web Services service that uses Config, such as Security Hub or Control Tower, and an Config role has already been created, make sure that the IAM role that you use when setting up Config keeps the same minimum permissions as the already created Config role. You must do this so that the other Amazon Web Services service continues to run as expected.

      For example, if Control Tower has an IAM role that allows Config to read Amazon Simple Storage Service (Amazon S3) objects, make sure that the same permissions are granted within the IAM role you use when setting up Config. Otherwise, it may interfere with how Control Tower operates. For more information about IAM roles for Config, see Identity and Access Management for Config in the Config Developer Guide.

      " }, "recordingGroup":{ "shape":"RecordingGroup", - "documentation":"

      Specifies the types of Amazon Web Services resources for which Config records configuration changes.

      " + "documentation":"

      Specifies which resource types Config records for configuration changes.

      High Number of Config Evaluations

      You may notice increased activity in your account during your initial month recording with Config when compared to subsequent months. During the initial bootstrapping process, Config runs evaluations on all the resources in your account that you have selected for Config to record.

      If you are running ephemeral workloads, you may see increased activity from Config as it records configuration changes associated with creating and deleting these temporary resources. An ephemeral workload is a temporary use of computing resources that are loaded and run when needed. Examples include Amazon Elastic Compute Cloud (Amazon EC2) Spot Instances, Amazon EMR jobs, and Auto Scaling. If you want to avoid the increased activity from running ephemeral workloads, you can run these types of workloads in a separate account with Config turned off to avoid increased configuration recording and rule evaluations.

      " } }, - "documentation":"

      An object that represents the recording of configuration changes of an Amazon Web Services resource.

      " + "documentation":"

      Records configuration changes to specified resource types. For more information about the configuration recorder, see Managing the Configuration Recorder in the Config Developer Guide.

      " }, "ConfigurationRecorderList":{ "type":"list", @@ -3413,7 +3413,7 @@ "documentation":"

      The mode of an evaluation. The valid values are Detective or Proactive.

      " } }, - "documentation":"

      Returns a filtered list of Detective or Proactive Config rules. By default, if the filter is not defined, this API returns an unfiltered list. For more information on Detective or Proactive Config rules, see Evaluation Mode in the Config Developer Guide.

      " + "documentation":"

      Returns a filtered list of Detective or Proactive Config rules. By default, if the filter is not defined, this API returns an unfiltered list. For more information on Detective or Proactive Config rules, see Evaluation Mode in the Config Developer Guide.

      " }, "DescribeConfigRulesRequest":{ "type":"structure", @@ -3428,7 +3428,7 @@ }, "Filters":{ "shape":"DescribeConfigRulesFilters", - "documentation":"

      Returns a list of Detective or Proactive Config rules. By default, this API returns an unfiltered list. For more information on Detective or Proactive Config rules, see Evaluation Mode in the Config Developer Guide.

      " + "documentation":"

      Returns a list of Detective or Proactive Config rules. By default, this API returns an unfiltered list. For more information on Detective or Proactive Config rules, see Evaluation Mode in the Config Developer Guide.

      " } }, "documentation":"

      " @@ -4155,6 +4155,16 @@ "max":1000, "min":0 }, + "ExclusionByResourceTypes":{ + "type":"structure", + "members":{ + "resourceTypes":{ + "shape":"ResourceTypeList", + "documentation":"

      A comma-separated list of resource types to exclude from recording by the configuration recorder.

      " + } + }, + "documentation":"

      Specifies whether the configuration recorder excludes resource types from being recorded. Use the resourceTypes field to enter a comma-separated list of resource types to exclude as exemptions.

      " + }, "ExecutionControls":{ "type":"structure", "members":{ @@ -4976,7 +4986,7 @@ "type":"structure", "members":{ }, - "documentation":"

      You have provided a configuration recorder name that is not valid.

      ", + "documentation":"

      You have provided a name for the configuration recorder that is not valid.

      ", "exception":true }, "InvalidDeliveryChannelNameException":{ @@ -5018,7 +5028,7 @@ "type":"structure", "members":{ }, - "documentation":"

      Config throws an exception if the recording group does not contain a valid list of resource types. Values that are not valid might also be incorrectly formatted.

      ", + "documentation":"

      Indicates one of the following errors:

      • You have provided a combination of parameter values that is not valid. For example:

      • Every parameter is either null, false, or empty.

      • You have reached the limit of the number of resource types you can provide for the recording group.

      • You have provided resource types or a recording strategy that are not valid.

      ", "exception":true }, "InvalidResultTokenException":{ @@ -5032,7 +5042,7 @@ "type":"structure", "members":{ }, - "documentation":"

      You have provided a null or empty role ARN.

      ", + "documentation":"

      You have provided a null or empty Amazon Resource Name (ARN) for the IAM role assumed by Config and used by the configuration recorder.

      ", "exception":true }, "InvalidS3KeyPrefixException":{ @@ -5323,14 +5333,14 @@ "type":"structure", "members":{ }, - "documentation":"

      You have reached the limit of the number of recorders you can create.

      ", + "documentation":"

      You have reached the limit of the number of configuration recorders you can create.

      ", "exception":true }, "MaxNumberOfConformancePacksExceededException":{ "type":"structure", "members":{ }, - "documentation":"

      You have reached the limit of the number of conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

      ", + "documentation":"

      You have reached the limit of the number of conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

      ", "exception":true }, "MaxNumberOfDeliveryChannelsExceededException":{ @@ -5344,14 +5354,14 @@ "type":"structure", "members":{ }, - "documentation":"

      You have reached the limit of the number of organization Config rules you can create. For more information, see see Service Limits in the Config Developer Guide.

      ", + "documentation":"

      You have reached the limit of the number of organization Config rules you can create. For more information, see see Service Limits in the Config Developer Guide.

      ", "exception":true }, "MaxNumberOfOrganizationConformancePacksExceededException":{ "type":"structure", "members":{ }, - "documentation":"

      You have reached the limit of the number of organization conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

      ", + "documentation":"

      You have reached the limit of the number of organization conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

      ", "exception":true }, "MaxNumberOfRetentionConfigurationsExceededException":{ @@ -5925,7 +5935,7 @@ "documentation":"

      A list of accounts that you can enable debug logging for your organization Config Custom Policy rule. List is null when debug logging is enabled for all accounts.

      " } }, - "documentation":"

      An object that specifies metadata for your organization Config Custom Policy rule including the runtime system in use, which accounts have debug logging enabled, and other custom rule metadata such as resource type, resource ID of Amazon Web Services resource, and organization trigger types that trigger Config to evaluate Amazon Web Services resources against a rule.

      " + "documentation":"

      metadata for your organization Config Custom Policy rule including the runtime system in use, which accounts have debug logging enabled, and other custom rule metadata such as resource type, resource ID of Amazon Web Services resource, and organization trigger types that trigger Config to evaluate Amazon Web Services resources against a rule.

      " }, "OrganizationCustomRuleMetadata":{ "type":"structure", @@ -5971,7 +5981,7 @@ "documentation":"

      The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

      " } }, - "documentation":"

      An object that specifies organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, and organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. It also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

      " + "documentation":"

      organization custom rule metadata such as resource type, resource ID of Amazon Web Services resource, Lambda function ARN, and organization trigger types that trigger Config to evaluate your Amazon Web Services resources against a rule. It also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

      " }, "OrganizationManagedRuleMetadata":{ "type":"structure", @@ -6010,7 +6020,7 @@ "documentation":"

      The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

      " } }, - "documentation":"

      An object that specifies organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. It also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

      " + "documentation":"

      organization managed rule metadata such as resource type and ID of Amazon Web Services resource along with the rule identifier. It also provides the frequency with which you want Config to run evaluations for the rule if the trigger type is periodic.

      " }, "OrganizationResourceDetailedStatus":{ "type":"string", @@ -6214,7 +6224,7 @@ "members":{ "ConfigurationRecorder":{ "shape":"ConfigurationRecorder", - "documentation":"

      The configuration recorder object that records each configuration change made to the resources.

      " + "documentation":"

      An object for the configuration recorder to record configuration changes for specified resource types.

      " } }, "documentation":"

      The input for the PutConfigurationRecorder action.

      " @@ -6590,18 +6600,44 @@ "members":{ "allSupported":{ "shape":"AllSupported", - "documentation":"

      Specifies whether Config records configuration changes for every supported type of regional resource.

      If you set this option to true, when Config adds support for a new type of regional resource, it starts recording resources of that type automatically.

      If you set this option to true, you cannot enumerate a list of resourceTypes.

      " + "documentation":"

      Specifies whether Config records configuration changes for all supported regional resource types.

      If you set this field to true, when Config adds support for a new type of regional resource, Config starts recording resources of that type automatically.

      If you set this field to true, you cannot enumerate specific resource types to record in the resourceTypes field of RecordingGroup, or to exclude in the resourceTypes field of ExclusionByResourceTypes.

      " }, "includeGlobalResourceTypes":{ "shape":"IncludeGlobalResourceTypes", - "documentation":"

      Specifies whether Config includes all supported types of global resources (for example, IAM resources) with the resources that it records.

      Before you can set this option to true, you must set the allSupported option to true.

      If you set this option to true, when Config adds support for a new type of global resource, it starts recording resources of that type automatically.

      The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items, you should consider customizing Config in only one region to record global resources.

      " + "documentation":"

      Specifies whether Config records configuration changes for all supported global resources.

      Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

      If you set this field to true, when Config adds support for a new type of global resource in the Region where you set up the configuration recorder, Config starts recording resources of that type automatically.

      If you set this field to false but list global resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

      If you do not want to record configuration changes to global resource types, make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.

      " }, "resourceTypes":{ "shape":"ResourceTypeList", - "documentation":"

      A comma-separated list that specifies the types of Amazon Web Services resources for which Config records configuration changes (for example, AWS::EC2::Instance or AWS::CloudTrail::Trail).

      To record all configuration changes, you must set the allSupported option to true.

      If you set the AllSupported option to false and populate the ResourceTypes option with values, when Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

      For a list of valid resourceTypes values, see the resourceType Value column in Supported Amazon Web Services resource Types.

      " + "documentation":"

      A comma-separated list that specifies which resource types Config records.

      Optionally, you can set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES.

      To record all configuration changes, set the allSupported field of RecordingGroup to true, and either omit this field or don't specify any resource types in this field. If you set the allSupported field to false and specify values for resourceTypes, when Config adds support for a new type of resource, it will not record resources of that type unless you manually add that type to your recording group.

      For a list of valid resourceTypes values, see the Resource Type Value column in Supported Amazon Web Services resource Types in the Config developer guide.

      Region Availability

      Before specifying a resource type for Config to track, check Resource Coverage by Region Availability to see if the resource type is supported in the Amazon Web Services Region where you set up Config. If a resource type is supported by Config in at least one Region, you can enable the recording of that resource type in all Regions supported by Config, even if the specified resource type is not supported in the Amazon Web Services Region where you set up Config.

      " + }, + "exclusionByResourceTypes":{ + "shape":"ExclusionByResourceTypes", + "documentation":"

      An object that specifies how Config excludes resource types from being recorded by the configuration recorder.

      To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.

      " + }, + "recordingStrategy":{ + "shape":"RecordingStrategy", + "documentation":"

      An object that specifies the recording strategy for the configuration recorder.

      • If you set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true. When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type.

      • If you set the useOnly field of RecordingStrategy to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types you specify in the resourceTypes field of RecordingGroup.

      • If you set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types except the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

      The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

      The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

      The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

      If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

      For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes.

      By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

      " } }, - "documentation":"

      Specifies which Amazon Web Services resource types Config records for configuration changes. In the recording group, you specify whether you want to record all supported resource types or only specific types of resources.

      By default, Config records the configuration changes for all supported types of regional resources that Config discovers in the region in which it is running. Regional resources are tied to a region and can be used only in that region. Examples of regional resources are EC2 instances and EBS volumes.

      You can also have Config record supported types of global resources. Global resources are not tied to a specific region and can be used in all regions. The global resource types that Config supports include IAM users, groups, roles, and customer managed policies.

      Global resource types onboarded to Config recording after February 2022 will only be recorded in the service's home region for the commercial partition and Amazon Web Services GovCloud (US) West for the GovCloud partition. You can view the Configuration Items for these new global resource types only in their home region and Amazon Web Services GovCloud (US) West.

      Supported global resource types onboarded before February 2022 such as AWS::IAM::Group, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User remain unchanged, and they will continue to deliver Configuration Items in all supported regions in Config. The change will only affect new global resource types onboarded after February 2022.

      To record global resource types onboarded after February 2022, enable All Supported Resource Types in the home region of the global resource type you want to record.

      If you don't want Config to record all resources, you can specify which types of resources it will record with the resourceTypes parameter.

      For a list of supported resource types, see Supported Resource Types.

      For more information and a table of the Home Regions for Global Resource Types Onboarded after February 2022, see Selecting Which Resources Config Records.

      " + "documentation":"

      Specifies which resource types Config records for configuration changes. In the recording group, you specify whether you want to record all supported resource types or to include or exclude specific types of resources.

      By default, Config records configuration changes for all supported types of Regional resources that Config discovers in the Amazon Web Services Region in which it is running. Regional resources are tied to a Region and can be used only in that Region. Examples of Regional resources are Amazon EC2 instances and Amazon EBS volumes.

      You can also have Config record supported types of global resources. Global resources are not tied to a specific Region and can be used in all Regions. The global resource types that Config supports include IAM users, groups, roles, and customer managed policies.

      Global resource types onboarded to Config recording after February 2022 will be recorded only in the service's home Region for the commercial partition and Amazon Web Services GovCloud (US-West) for the Amazon Web Services GovCloud (US) partition. You can view the Configuration Items for these new global resource types only in their home Region and Amazon Web Services GovCloud (US-West).

      If you don't want Config to record all resources, you can specify which types of resources Config records with the resourceTypes parameter.

      For a list of supported resource types, see Supported Resource Types in the Config developer guide.

      For more information and a table of the Home Regions for Global Resource Types Onboarded after February 2022, see Selecting Which Resources Config Records in the Config developer guide.

      " + }, + "RecordingStrategy":{ + "type":"structure", + "members":{ + "useOnly":{ + "shape":"RecordingStrategyType", + "documentation":"

      The recording strategy for the configuration recorder.

      • If you set this option to ALL_SUPPORTED_RESOURCE_TYPES, Config records configuration changes for all supported regional resource types. You also must set the allSupported field of RecordingGroup to true.

        When Config adds support for a new type of regional resource, Config automatically starts recording resources of that type. For a list of supported resource types, see Supported Resource Types in the Config developer guide.

      • If you set this option to INCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for only the resource types that you specify in the resourceTypes field of RecordingGroup.

      • If you set this option to EXCLUSION_BY_RESOURCE_TYPES, Config records configuration changes for all supported resource types, except the resource types that you specify as exemptions to exclude from being recorded in the resourceTypes field of ExclusionByResourceTypes.

      The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

      The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

      The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

      If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

      For example, even if you set includeGlobalResourceTypes to false, global resource types will still be automatically recorded in this option unless those resource types are specifically listed as exemptions in the resourceTypes field of exclusionByResourceTypes.

      By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

      " + } + }, + "documentation":"

      Specifies the recording strategy of the configuration recorder.

      " + }, + "RecordingStrategyType":{ + "type":"string", + "enum":[ + "ALL_SUPPORTED_RESOURCE_TYPES", + "INCLUSION_BY_RESOURCE_TYPES", + "EXCLUSION_BY_RESOURCE_TYPES" + ] }, "ReevaluateConfigRuleNames":{ "type":"list", @@ -7420,7 +7456,28 @@ "AWS::Redshift::ScheduledAction", "AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::SageMaker::AppImageConfig", - "AWS::SageMaker::Image" + "AWS::SageMaker::Image", + "AWS::ECS::TaskSet", + "AWS::Cassandra::Keyspace", + "AWS::Signer::SigningProfile", + "AWS::Amplify::App", + "AWS::AppMesh::VirtualNode", + "AWS::AppMesh::VirtualService", + "AWS::AppRunner::VpcConnector", + "AWS::AppStream::Application", + "AWS::CodeArtifact::Repository", + "AWS::EC2::PrefixList", + "AWS::EC2::SpotFleet", + "AWS::Evidently::Project", + "AWS::Forecast::Dataset", + "AWS::IAM::SAMLProvider", + "AWS::IAM::ServerCertificate", + "AWS::Pinpoint::Campaign", + "AWS::Pinpoint::InAppTemplate", + "AWS::SageMaker::Domain", + "AWS::Transfer::Agreement", + "AWS::Transfer::Connector", + "AWS::KinesisFirehose::DeliveryStream" ] }, "ResourceTypeList":{ @@ -8048,7 +8105,7 @@ "type":"structure", "members":{ }, - "documentation":"

      You have reached the limit of the number of tags you can use. For more information, see Service Limits in the Config Developer Guide.

      ", + "documentation":"

      You have reached the limit of the number of tags you can use. For more information, see Service Limits in the Config Developer Guide.

      ", "exception":true }, "UnprocessedResourceIdentifierList":{ diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 57a984615f03..d7d03c5339e5 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index 230d2e1b39b2..e6c58c5cc27a 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -228,6 +228,24 @@ "output_token": "NextToken", "result_key": "AvailableNumbersList" }, + "SearchHoursOfOperations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperations" + }, + "SearchPrompts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "Prompts" + }, "SearchQueues": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -237,6 +255,15 @@ "output_token": "NextToken", "result_key": "Queues" }, + "SearchQuickConnects": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "QuickConnects" + }, "SearchRoutingProfiles": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index e8da6845a9c0..4a94f31edd0e 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -373,6 +373,24 @@ ], "documentation":"

      Adds a new participant into an on-going chat contact. For more information, see Customize chat flow experiences by integrating custom participants.

      " }, + "CreatePrompt":{ + "name":"CreatePrompt", + "http":{ + "method":"PUT", + "requestUri":"/prompts/{InstanceId}" + }, + "input":{"shape":"CreatePromptRequest"}, + "output":{"shape":"CreatePromptResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator's Guide.

      " + }, "CreateQueue":{ "name":"CreateQueue", "http":{ @@ -611,7 +629,8 @@ {"shape":"InternalServiceException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

      Deletes a contact evaluation in the specified Amazon Connect instance.

      " + "documentation":"

      Deletes a contact evaluation in the specified Amazon Connect instance.

      ", + "idempotent":true }, "DeleteContactFlow":{ "name":"DeleteContactFlow", @@ -663,7 +682,8 @@ {"shape":"InternalServiceException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

      Deletes an evaluation form in the specified Amazon Connect instance.

      • If the version property is provided, only the specified version of the evaluation form is deleted.

      • If no version is provided, then the full form (all versions) is deleted.

      " + "documentation":"

      Deletes an evaluation form in the specified Amazon Connect instance.

      • If the version property is provided, only the specified version of the evaluation form is deleted.

      • If no version is provided, then the full form (all versions) is deleted.

      ", + "idempotent":true }, "DeleteHoursOfOperation":{ "name":"DeleteHoursOfOperation", @@ -710,6 +730,22 @@ ], "documentation":"

      Deletes an Amazon Web Services resource association from an Amazon Connect instance. The association must not have any use cases associated with it.

      " }, + "DeletePrompt":{ + "name":"DeletePrompt", + "http":{ + "method":"DELETE", + "requestUri":"/prompts/{InstanceId}/{PromptId}" + }, + "input":{"shape":"DeletePromptRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Deletes a prompt.

      " + }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", "http":{ @@ -1045,6 +1081,23 @@ ], "documentation":"

      Gets details and status of a phone number that’s claimed to your Amazon Connect instance or traffic distribution group.

      If the number is claimed to a traffic distribution group, and you are calling in the Amazon Web Services Region where the traffic distribution group was created, you can use either a phone number ARN or UUID value for the PhoneNumberId URI request parameter. However, if the number is claimed to a traffic distribution group and you are calling this API in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

      " }, + "DescribePrompt":{ + "name":"DescribePrompt", + "http":{ + "method":"GET", + "requestUri":"/prompts/{InstanceId}/{PromptId}" + }, + "input":{"shape":"DescribePromptRequest"}, + "output":{"shape":"DescribePromptResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Describes the prompt.

      " + }, "DescribeQueue":{ "name":"DescribeQueue", "http":{ @@ -1475,7 +1528,24 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Gets metric data from the specified Amazon Connect instance.

      GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 35 days, in 24-hour intervals.

      For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

      This API is not available in the Amazon Web Services GovCloud (US) Regions.

      " + "documentation":"

      Gets metric data from the specified Amazon Connect instance.

      GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 35 days, in 24-hour intervals.

      For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

      " + }, + "GetPromptFile":{ + "name":"GetPromptFile", + "http":{ + "method":"GET", + "requestUri":"/prompts/{InstanceId}/{PromptId}/file" + }, + "input":{"shape":"GetPromptFileRequest"}, + "output":{"shape":"GetPromptFileResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Gets the prompt file.

      " }, "GetTaskTemplate":{ "name":"GetTaskTemplate", @@ -2184,7 +2254,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

      When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording the call.

      Only voice recordings are supported at this time.

      " + "documentation":"

      When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording the call or screen.

      Voice and screen recordings are supported.

      " }, "SearchAvailablePhoneNumbers":{ "name":"SearchAvailablePhoneNumbers", @@ -2202,6 +2272,40 @@ ], "documentation":"

      Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with the traffic distribution group.

      " }, + "SearchHoursOfOperations":{ + "name":"SearchHoursOfOperations", + "http":{ + "method":"POST", + "requestUri":"/search-hours-of-operations" + }, + "input":{"shape":"SearchHoursOfOperationsRequest"}, + "output":{"shape":"SearchHoursOfOperationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Searches the hours of operation in an Amazon Connect instance, with optional filtering.

      " + }, + "SearchPrompts":{ + "name":"SearchPrompts", + "http":{ + "method":"POST", + "requestUri":"/search-prompts" + }, + "input":{"shape":"SearchPromptsRequest"}, + "output":{"shape":"SearchPromptsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Searches prompts in an Amazon Connect instance, with optional filtering.

      " + }, "SearchQueues":{ "name":"SearchQueues", "http":{ @@ -2219,6 +2323,23 @@ ], "documentation":"

      This API is in preview release for Amazon Connect and is subject to change.

      Searches queues in an Amazon Connect instance, with optional filtering.

      " }, + "SearchQuickConnects":{ + "name":"SearchQuickConnects", + "http":{ + "method":"POST", + "requestUri":"/search-quick-connects" + }, + "input":{"shape":"SearchQuickConnectsRequest"}, + "output":{"shape":"SearchQuickConnectsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Searches quick connects in an Amazon Connect instance, with optional filtering.

      " + }, "SearchRoutingProfiles":{ "name":"SearchRoutingProfiles", "http":{ @@ -2470,7 +2591,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

      When a contact is being recorded, this API suspends recording the call. For example, you might suspend the call recording while collecting sensitive information, such as a credit card number. Then use ResumeContactRecording to restart recording.

      The period of time that the recording is suspended is filled with silence in the final recording.

      Only voice recordings are supported at this time.

      " + "documentation":"

      When a contact is being recorded, this API suspends recording the call or screen. For example, you might suspend the call or screen recording while collecting sensitive information, such as a credit card number. Then use ResumeContactRecording to restart recording.

      The period of time that the recording is suspended is filled with silence in the final recording.

      Voice and screen recordings are supported.

      " }, "TagResource":{ "name":"TagResource", @@ -2805,6 +2926,23 @@ ], "documentation":"

      Updates your claimed phone number from its current Amazon Connect instance or traffic distribution group to another Amazon Connect instance or traffic distribution group in the same Amazon Web Services Region.

      After using this API, you must verify that the phone number is attached to the correct flow in the target instance or traffic distribution group. You need to do this because the API switches only the phone number to a new instance or traffic distribution group. It doesn't migrate the flow configuration of the phone number, too.

      You can call DescribePhoneNumber API to verify the status of a previous UpdatePhoneNumber operation.

      " }, + "UpdatePrompt":{ + "name":"UpdatePrompt", + "http":{ + "method":"POST", + "requestUri":"/prompts/{InstanceId}/{PromptId}" + }, + "input":{"shape":"UpdatePromptRequest"}, + "output":{"shape":"UpdatePromptResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

      Updates a prompt.

      " + }, "UpdateQueueHoursOfOperation":{ "name":"UpdateQueueHoursOfOperation", "http":{ @@ -4062,7 +4200,7 @@ }, "InitiationTimestamp":{ "shape":"timestamp", - "documentation":"

      The date and time this contact was initiated, in UTC time. For INBOUND, this is when the contact arrived. For OUTBOUND, this is when the agent began dialing. For CALLBACK, this is when the callback contact was created. For TRANSFER and QUEUE_TRANSFER, this is when the transfer was initiated. For API, this is when the request arrived.

      " + "documentation":"

      The date and time this contact was initiated, in UTC time. For INBOUND, this is when the contact arrived. For OUTBOUND, this is when the agent began dialing. For CALLBACK, this is when the callback contact was created. For TRANSFER and QUEUE_TRANSFER, this is when the transfer was initiated. For API, this is when the request arrived. For EXTERNAL_OUTBOUND, this is when the agent started dialing the external participant. For MONITOR, this is when the supervisor started listening to a contact.

      " }, "DisconnectTimestamp":{ "shape":"timestamp", @@ -4325,7 +4463,8 @@ "CALLBACK", "API", "DISCONNECT", - "MONITOR" + "MONITOR", + "EXTERNAL_OUTBOUND" ] }, "ContactNotFoundException":{ @@ -4781,6 +4920,51 @@ } } }, + "CreatePromptRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name", + "S3Uri" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      ", + "location":"uri", + "locationName":"InstanceId" + }, + "Name":{ + "shape":"CommonNameLength127", + "documentation":"

      The name of the prompt.

      " + }, + "Description":{ + "shape":"PromptDescription", + "documentation":"

      The description of the prompt.

      " + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

      The URI for the S3 bucket where the prompt is stored.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

      " + } + } + }, + "CreatePromptResponse":{ + "type":"structure", + "members":{ + "PromptARN":{ + "shape":"ARN", + "documentation":"

      The Amazon Resource Name (ARN) of the prompt.

      " + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      " + } + } + }, "CreateQueueRequest":{ "type":"structure", "required":[ @@ -5754,6 +5938,27 @@ } } }, + "DeletePromptRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "PromptId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      ", + "location":"uri", + "locationName":"InstanceId" + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      ", + "location":"uri", + "locationName":"PromptId" + } + } + }, "DeleteQuickConnectRequest":{ "type":"structure", "required":[ @@ -6307,6 +6512,36 @@ } } }, + "DescribePromptRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "PromptId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      ", + "location":"uri", + "locationName":"InstanceId" + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      ", + "location":"uri", + "locationName":"PromptId" + } + } + }, + "DescribePromptResponse":{ + "type":"structure", + "members":{ + "Prompt":{ + "shape":"Prompt", + "documentation":"

      Information about the prompt.

      " + } + } + }, "DescribeQueueRequest":{ "type":"structure", "required":[ @@ -7233,7 +7468,7 @@ "type":"list", "member":{"shape":"EvaluationFormItem"}, "max":100, - "min":0 + "min":1 }, "EvaluationFormNumericQuestionAutomation":{ "type":"structure", @@ -7417,7 +7652,8 @@ "type":"structure", "required":[ "Title", - "RefId" + "RefId", + "Items" ], "members":{ "Title":{ @@ -7610,7 +7846,7 @@ "EvaluationFormTitle":{ "type":"string", "max":128, - "min":0 + "min":1 }, "EvaluationFormVersionIsLocked":{"type":"boolean"}, "EvaluationFormVersionStatus":{ @@ -8185,6 +8421,36 @@ } } }, + "GetPromptFileRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "PromptId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      ", + "location":"uri", + "locationName":"InstanceId" + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      ", + "location":"uri", + "locationName":"PromptId" + } + } + }, + "GetPromptFileResponse":{ + "type":"structure", + "members":{ + "PromptPresignedUrl":{ + "shape":"PromptPresignedUrl", + "documentation":"

      A generated URL to the prompt that can be given to an unauthorized user so they can access the prompt in S3.

      " + } + } + }, "GetTaskTemplateRequest":{ "type":"structure", "required":[ @@ -8732,7 +8998,40 @@ "min":1 }, "HoursOfOperationId":{"type":"string"}, + "HoursOfOperationList":{ + "type":"list", + "member":{"shape":"HoursOfOperation"} + }, "HoursOfOperationName":{"type":"string"}, + "HoursOfOperationSearchConditionList":{ + "type":"list", + "member":{"shape":"HoursOfOperationSearchCriteria"} + }, + "HoursOfOperationSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"HoursOfOperationSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an OR condition.

      " + }, + "AndConditions":{ + "shape":"HoursOfOperationSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an AND condition.

      " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, timezone, and resourceID.

      " + } + }, + "documentation":"

      The search criteria to be used to return hours of operations.

      " + }, + "HoursOfOperationSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

      Filters to be applied to search results.

      " + }, "HoursOfOperationSummary":{ "type":"structure", "members":{ @@ -8828,6 +9127,10 @@ "OutboundCallsEnabled":{ "shape":"OutboundCallsEnabled", "documentation":"

      Whether outbound calls are enabled.

      " + }, + "InstanceAccessUrl":{ + "shape":"Url", + "documentation":"

      This URL allows contact center users to access Amazon Connect admin website.

      " } }, "documentation":"

      The Amazon Connect instance.

      " @@ -8931,7 +9234,8 @@ "AGENT_EVENTS", "REAL_TIME_CONTACT_ANALYSIS_SEGMENTS", "ATTACHMENTS", - "CONTACT_EVALUATIONS" + "CONTACT_EVALUATIONS", + "SCREEN_RECORDINGS" ] }, "InstanceSummary":{ @@ -8972,6 +9276,10 @@ "OutboundCallsEnabled":{ "shape":"OutboundCallsEnabled", "documentation":"

      Whether outbound calls are enabled.

      " + }, + "InstanceAccessUrl":{ + "shape":"Url", + "documentation":"

      This URL allows contact center users to access Amazon Connect admin website.

      " } }, "documentation":"

      Information about the instance.

      " @@ -11586,16 +11894,85 @@ "max":50, "min":1 }, + "Prompt":{ + "type":"structure", + "members":{ + "PromptARN":{ + "shape":"ARN", + "documentation":"

      The Amazon Resource Name (ARN) of the prompt.

      " + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      " + }, + "Name":{ + "shape":"CommonNameLength127", + "documentation":"

      The name of the prompt.

      " + }, + "Description":{ + "shape":"PromptDescription", + "documentation":"

      The description of the prompt.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

      " + } + }, + "documentation":"

      Information about a prompt.

      " + }, + "PromptDescription":{ + "type":"string", + "max":250, + "min":1 + }, "PromptId":{ "type":"string", "max":256, "min":1 }, + "PromptList":{ + "type":"list", + "member":{"shape":"Prompt"} + }, "PromptName":{ "type":"string", "max":256, "min":1 }, + "PromptPresignedUrl":{ + "type":"string", + "max":2000, + "min":1 + }, + "PromptSearchConditionList":{ + "type":"list", + "member":{"shape":"PromptSearchCriteria"} + }, + "PromptSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"PromptSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an OR condition.

      " + }, + "AndConditions":{ + "shape":"PromptSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an AND condition.

      " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, and resourceID.

      " + } + }, + "documentation":"

      The search criteria to be used to return prompts.

      " + }, + "PromptSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

      Filters to be applied to search results.

      " + }, "PromptSummary":{ "type":"structure", "members":{ @@ -11817,7 +12194,10 @@ "shape":"QueueSearchConditionList", "documentation":"

      A list of conditions which would be applied together with an AND condition.

      " }, - "StringCondition":{"shape":"StringCondition"}, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, and resourceID.

      " + }, "QueueTypeCondition":{ "shape":"SearchableQueueType", "documentation":"

      The type of queue.

      " @@ -11951,6 +12331,39 @@ "max":127, "min":1 }, + "QuickConnectSearchConditionList":{ + "type":"list", + "member":{"shape":"QuickConnectSearchCriteria"} + }, + "QuickConnectSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"QuickConnectSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an OR condition.

      " + }, + "AndConditions":{ + "shape":"QuickConnectSearchConditionList", + "documentation":"

      A list of conditions which would be applied together with an AND condition.

      " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, and resourceID.

      " + } + }, + "documentation":"

      The search criteria to be used to return quick connects.

      " + }, + "QuickConnectSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

      Filters to be applied to search results.

      " + }, + "QuickConnectSearchSummaryList":{ + "type":"list", + "member":{"shape":"QuickConnect"} + }, "QuickConnectSummary":{ "type":"structure", "members":{ @@ -12031,7 +12444,7 @@ "ReferenceId":{ "type":"string", "max":40, - "min":0 + "min":1 }, "ReferenceKey":{ "type":"string", @@ -12199,7 +12612,7 @@ }, "ResourceId":{ "type":"string", - "max":50, + "max":500, "min":1 }, "ResourceInUseException":{ @@ -12464,7 +12877,10 @@ "shape":"RoutingProfileSearchConditionList", "documentation":"

      A list of conditions which would be applied together with an AND condition.

      " }, - "StringCondition":{"shape":"StringCondition"} + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, and resourceID.

      " + } }, "documentation":"

      The search criteria to be used to return routing profiles.

      The name and description fields support \"contains\" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will throw invalid results.

      " }, @@ -12704,6 +13120,12 @@ }, "documentation":"

      Information about the Amazon Simple Storage Service (Amazon S3) storage type.

      " }, + "S3Uri":{ + "type":"string", + "max":512, + "min":1, + "pattern":"s3://\\S+/.+" + }, "SearchAvailablePhoneNumbersRequest":{ "type":"structure", "required":[ @@ -12752,6 +13174,94 @@ } } }, + "SearchHoursOfOperationsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

      " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

      The maximum number of results to return per page.

      ", + "box":true + }, + "SearchFilter":{ + "shape":"HoursOfOperationSearchFilter", + "documentation":"

      Filters to be applied to search results.

      " + }, + "SearchCriteria":{ + "shape":"HoursOfOperationSearchCriteria", + "documentation":"

      The search criteria to be used to return hours of operations.

      " + } + } + }, + "SearchHoursOfOperationsResponse":{ + "type":"structure", + "members":{ + "HoursOfOperations":{ + "shape":"HoursOfOperationList", + "documentation":"

      Information about the hours of operations.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      If there are additional results, this is the token for the next set of results.

      " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

      The total number of hours of operations which matched your search query.

      " + } + } + }, + "SearchPromptsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

      " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

      The maximum number of results to return per page.

      ", + "box":true + }, + "SearchFilter":{ + "shape":"PromptSearchFilter", + "documentation":"

      Filters to be applied to search results.

      " + }, + "SearchCriteria":{ + "shape":"PromptSearchCriteria", + "documentation":"

      The search criteria to be used to return prompts.

      " + } + } + }, + "SearchPromptsResponse":{ + "type":"structure", + "members":{ + "Prompts":{ + "shape":"PromptList", + "documentation":"

      Information about the prompts.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      If there are additional results, this is the token for the next set of results.

      " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

      The total number of quick connects which matched your search query.

      " + } + } + }, "SearchQueuesRequest":{ "type":"structure", "required":["InstanceId"], @@ -12796,6 +13306,50 @@ } } }, + "SearchQuickConnectsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

      " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

      The maximum number of results to return per page.

      ", + "box":true + }, + "SearchFilter":{ + "shape":"QuickConnectSearchFilter", + "documentation":"

      Filters to be applied to search results.

      " + }, + "SearchCriteria":{ + "shape":"QuickConnectSearchCriteria", + "documentation":"

      The search criteria to be used to return quick connects.

      " + } + } + }, + "SearchQuickConnectsResponse":{ + "type":"structure", + "members":{ + "QuickConnects":{ + "shape":"QuickConnectSearchSummaryList", + "documentation":"

      Information about the quick connects.

      " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

      If there are additional results, this is the token for the next set of results.

      " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

      The total number of quick connects which matched your search query.

      " + } + } + }, "SearchRoutingProfilesRequest":{ "type":"structure", "required":["InstanceId"], @@ -13684,7 +14238,7 @@ "documentation":"

      The type of comparison to be made when evaluating the string condition.

      " } }, - "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported value for FieldName: name

      " + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      " }, "StringReference":{ "type":"structure", @@ -14934,6 +15488,52 @@ } } }, + "UpdatePromptRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "PromptId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

      The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

      ", + "location":"uri", + "locationName":"InstanceId" + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      ", + "location":"uri", + "locationName":"PromptId" + }, + "Name":{ + "shape":"CommonNameLength127", + "documentation":"

      The name of the prompt.

      " + }, + "Description":{ + "shape":"PromptDescription", + "documentation":"

      A description of the prompt.

      " + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

      The URI for the S3 bucket where the prompt is stored.

      " + } + } + }, + "UpdatePromptResponse":{ + "type":"structure", + "members":{ + "PromptARN":{ + "shape":"ARN", + "documentation":"

      The Amazon Resource Name (ARN) of the prompt.

      " + }, + "PromptId":{ + "shape":"PromptId", + "documentation":"

      A unique identifier for the prompt.

      " + } + } + }, "UpdateQueueHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -15908,7 +16508,7 @@ }, "StringCondition":{ "shape":"StringCondition", - "documentation":"

      A leaf node condition which can be used to specify a string condition.

      " + "documentation":"

      A leaf node condition which can be used to specify a string condition.

      The currently supported values for FieldName are name, description, and resourceID.

      " }, "HierarchyGroupCondition":{ "shape":"HierarchyGroupCondition", diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index 776f65c1beab..8fd902691897 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index ab8d54bb781d..04534a2290c3 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcases/src/main/resources/codegen-resources/service-2.json b/services/connectcases/src/main/resources/codegen-resources/service-2.json index 2869713e24ac..698901ad1307 100644 --- a/services/connectcases/src/main/resources/codegen-resources/service-2.json +++ b/services/connectcases/src/main/resources/codegen-resources/service-2.json @@ -1513,7 +1513,8 @@ "Number", "Boolean", "DateTime", - "SingleSelect" + "SingleSelect", + "Url" ] }, "FieldValue":{ diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 0c8a5b3b1351..b197f9eb1465 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 816fe1555061..6276d1b8c603 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 30987df76882..adb862f7abb3 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 9687b5bf72cb..c53919ea02a6 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costandusagereport/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costandusagereport/src/main/resources/codegen-resources/endpoint-rule-set.json index d9ca8e7e6f4f..a917d0c9310d 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costandusagereport/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cur-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cur-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cur-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cur.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://cur-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cur.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cur.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://cur.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/costandusagereport/src/main/resources/codegen-resources/endpoint-tests.json b/services/costandusagereport/src/main/resources/codegen-resources/endpoint-tests.json index 32c35c42d837..b3f84098abd6 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/costandusagereport/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,18 @@ { "testCases": [ + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -8,9 +21,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -21,9 +34,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -34,87 +47,248 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cur.us-east-1.amazonaws.com" + "url": "https://cur.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cur-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cur-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cur-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://cur-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cur.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cur.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cur.cn-northwest-1.amazonaws.com.cn" + "url": "https://cur.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cur-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cur.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cur.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -124,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -136,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/costandusagereport/src/main/resources/codegen-resources/service-2.json b/services/costandusagereport/src/main/resources/codegen-resources/service-2.json index 5bea391e51cc..33a8b741beb6 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/service-2.json +++ b/services/costandusagereport/src/main/resources/codegen-resources/service-2.json @@ -79,6 +79,7 @@ "af-south-1", "ap-east-1", "ap-south-1", + "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", @@ -87,6 +88,7 @@ "ap-northeast-3", "ca-central-1", "eu-central-1", + "eu-central-2", "eu-west-1", "eu-west-2", "eu-west-3", @@ -335,7 +337,10 @@ "SchemaElement":{ "type":"string", "documentation":"

      Whether or not AWS includes resource IDs in the report.

      ", - "enum":["RESOURCES"] + "enum":[ + "RESOURCES", + "SPLIT_COST_ALLOCATION_DATA" + ] }, "SchemaElementList":{ "type":"list", diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index 45d8e7b55270..5502d0592019 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 costexplorer diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 623d547e646e..f4b1fbdc70c9 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json index 14c2b402904c..a70609892ae3 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,154 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://profile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://profile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://profile-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://profile-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://profile.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -286,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://profile.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://profile.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -295,28 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://profile.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-tests.json b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-tests.json index ac21c7726877..1997557a244c 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -268,9 +268,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -281,9 +292,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -294,9 +316,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -307,9 +340,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -320,22 +364,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -345,9 +402,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -357,11 +414,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..58e94da63dd2 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,10 @@ { "pagination": { + "ListEventStreams": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + } } } diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index 8b424bbb8997..dcd2477861c9 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -30,6 +30,23 @@ ], "documentation":"

      Associates a new key value with a specific profile, such as a Contact Record ContactId.

      A profile object can have a single unique key and any number of additional keys that can be used to identify the profile that it belongs to.

      " }, + "CreateCalculatedAttributeDefinition":{ + "name":"CreateCalculatedAttributeDefinition", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}" + }, + "input":{"shape":"CreateCalculatedAttributeDefinitionRequest"}, + "output":{"shape":"CreateCalculatedAttributeDefinitionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a new calculated attribute definition. After creation, new object data ingested into Customer Profiles will be included in the calculated attribute, which can be retrieved for a profile using the GetCalculatedAttributeForProfile API. Defining a calculated attribute makes it available for all profiles within a domain. Each calculated attribute can only reference one ObjectType and at most, two fields from that ObjectType.

      " + }, "CreateDomain":{ "name":"CreateDomain", "http":{ @@ -47,6 +64,23 @@ ], "documentation":"

      Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

      Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

      Use this API or UpdateDomain to enable identity resolution: set Matching to true.

      To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

      " }, + "CreateEventStream":{ + "name":"CreateEventStream", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/event-streams/{EventStreamName}" + }, + "input":{"shape":"CreateEventStreamRequest"}, + "output":{"shape":"CreateEventStreamResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles.

      Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain

      " + }, "CreateIntegrationWorkflow":{ "name":"CreateIntegrationWorkflow", "http":{ @@ -81,6 +115,23 @@ ], "documentation":"

      Creates a standard profile.

      A standard profile represents the following attributes for a customer profile in a domain.

      " }, + "DeleteCalculatedAttributeDefinition":{ + "name":"DeleteCalculatedAttributeDefinition", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}" + }, + "input":{"shape":"DeleteCalculatedAttributeDefinitionRequest"}, + "output":{"shape":"DeleteCalculatedAttributeDefinitionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes an existing calculated attribute definition. Note that deleting a default calculated attribute is possible, however once deleted, you will be unable to undo that action and will need to recreate it on your own using the CreateCalculatedAttributeDefinition API if you want it back.

      " + }, "DeleteDomain":{ "name":"DeleteDomain", "http":{ @@ -98,6 +149,24 @@ ], "documentation":"

      Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.

      " }, + "DeleteEventStream":{ + "name":"DeleteEventStream", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/event-streams/{EventStreamName}" + }, + "input":{"shape":"DeleteEventStreamRequest"}, + "output":{"shape":"DeleteEventStreamResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Disables and deletes the specified event stream.

      ", + "idempotent":true + }, "DeleteIntegration":{ "name":"DeleteIntegration", "http":{ @@ -217,6 +286,40 @@ ], "documentation":"

      Tests the auto-merging settings of your Identity Resolution Job without merging your data. It randomly selects a sample of matching groups from the existing matching results, and applies the automerging settings that you provided. You can then view the number of profiles in the sample, the number of matches, and the number of profiles identified to be merged. This enables you to evaluate the accuracy of the attributes in your matching list.

      You can't view which profiles are matched and would be merged.

      We strongly recommend you use this API to do a dry run of the automerging process before running the Identity Resolution Job. Include at least two matching attributes. If your matching list includes too few attributes (such as only FirstName or only LastName), there may be a large number of matches. This increases the chances of erroneous merges.

      " }, + "GetCalculatedAttributeDefinition":{ + "name":"GetCalculatedAttributeDefinition", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}" + }, + "input":{"shape":"GetCalculatedAttributeDefinitionRequest"}, + "output":{"shape":"GetCalculatedAttributeDefinitionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Provides more information on a calculated attribute definition for Customer Profiles.

      " + }, + "GetCalculatedAttributeForProfile":{ + "name":"GetCalculatedAttributeForProfile", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/profile/{ProfileId}/calculated-attributes/{CalculatedAttributeName}" + }, + "input":{"shape":"GetCalculatedAttributeForProfileRequest"}, + "output":{"shape":"GetCalculatedAttributeForProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieve a calculated attribute for a customer profile.

      " + }, "GetDomain":{ "name":"GetDomain", "http":{ @@ -234,6 +337,23 @@ ], "documentation":"

      Returns information about a specific domain.

      " }, + "GetEventStream":{ + "name":"GetEventStream", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/event-streams/{EventStreamName}" + }, + "input":{"shape":"GetEventStreamRequest"}, + "output":{"shape":"GetEventStreamResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns information about the specified event stream in a specific domain.

      " + }, "GetIdentityResolutionJob":{ "name":"GetIdentityResolutionJob", "http":{ @@ -370,6 +490,40 @@ ], "documentation":"

      Lists all of the integrations associated to a specific URI in the AWS account.

      " }, + "ListCalculatedAttributeDefinitions":{ + "name":"ListCalculatedAttributeDefinitions", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/calculated-attributes" + }, + "input":{"shape":"ListCalculatedAttributeDefinitionsRequest"}, + "output":{"shape":"ListCalculatedAttributeDefinitionsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Lists calculated attribute definitions for Customer Profiles

      " + }, + "ListCalculatedAttributesForProfile":{ + "name":"ListCalculatedAttributesForProfile", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/profile/{ProfileId}/calculated-attributes" + }, + "input":{"shape":"ListCalculatedAttributesForProfileRequest"}, + "output":{"shape":"ListCalculatedAttributesForProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieve a list of calculated attributes for a customer profile.

      " + }, "ListDomains":{ "name":"ListDomains", "http":{ @@ -387,6 +541,23 @@ ], "documentation":"

      Returns a list of all the domains for an AWS account that have been created.

      " }, + "ListEventStreams":{ + "name":"ListEventStreams", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/event-streams" + }, + "input":{"shape":"ListEventStreamsRequest"}, + "output":{"shape":"ListEventStreamsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns a list of all the event streams in a specific domain.

      " + }, "ListIdentityResolutionJobs":{ "name":"ListIdentityResolutionJobs", "http":{ @@ -618,6 +789,23 @@ ], "documentation":"

      Removes one or more tags from the specified Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

      " }, + "UpdateCalculatedAttributeDefinition":{ + "name":"UpdateCalculatedAttributeDefinition", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}" + }, + "input":{"shape":"UpdateCalculatedAttributeDefinitionRequest"}, + "output":{"shape":"UpdateCalculatedAttributeDefinitionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Updates an existing calculated attribute definition. When updating the Conditions, note that increasing the date range of a calculated attribute will not trigger inclusion of historical data greater than the current date range.

      " + }, "UpdateDomain":{ "name":"UpdateDomain", "http":{ @@ -880,6 +1068,41 @@ }, "documentation":"

      Workflow step details for APPFLOW_INTEGRATION workflow.

      " }, + "AttributeDetails":{ + "type":"structure", + "required":[ + "Attributes", + "Expression" + ], + "members":{ + "Attributes":{ + "shape":"AttributeList", + "documentation":"

      A list of attribute items specified in the mathematical expression.

      " + }, + "Expression":{ + "shape":"string1To255", + "documentation":"

      Mathematical expression that is performed on attribute items provided in the attribute list. Each element in the expression should follow the structure of \\\"{ObjectTypeName.AttributeName}\\\".

      " + } + }, + "documentation":"

      Mathematical expression and a list of attribute items specified in that expression.

      " + }, + "AttributeItem":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"attributeName", + "documentation":"

      The name of an attribute defined in a profile object type.

      " + } + }, + "documentation":"

      The details of a single attribute item specified in the mathematical expression.

      " + }, + "AttributeList":{ + "type":"list", + "member":{"shape":"AttributeItem"}, + "max":2, + "min":1 + }, "AttributeSourceIdMap":{ "type":"map", "key":{"shape":"string1To255"}, @@ -955,6 +1178,32 @@ "max":512, "pattern":".*" }, + "CalculatedAttributeDefinitionsList":{ + "type":"list", + "member":{"shape":"ListCalculatedAttributeDefinitionItem"} + }, + "CalculatedAttributesForProfileList":{ + "type":"list", + "member":{"shape":"ListCalculatedAttributeForProfileItem"} + }, + "Conditions":{ + "type":"structure", + "members":{ + "Range":{ + "shape":"Range", + "documentation":"

      The relative time period over which data is included in the aggregation.

      " + }, + "ObjectCount":{ + "shape":"ObjectCount", + "documentation":"

      The number of profile objects used for the calculated attribute.

      " + }, + "Threshold":{ + "shape":"Threshold", + "documentation":"

      The threshold for the calculated attribute.

      " + } + }, + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + }, "ConflictResolution":{ "type":"structure", "required":["ConflictResolvingModel"], @@ -1019,6 +1268,94 @@ }, "documentation":"

      The matching criteria to be used during the auto-merging process.

      " }, + "CreateCalculatedAttributeDefinitionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "CalculatedAttributeName", + "AttributeDetails", + "Statistic" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      ", + "location":"uri", + "locationName":"CalculatedAttributeName" + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The description of the calculated attribute.

      " + }, + "AttributeDetails":{ + "shape":"AttributeDetails", + "documentation":"

      Mathematical expression and a list of attribute items specified in that expression.

      " + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

      The aggregation operation to perform for the calculated attribute.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, + "CreateCalculatedAttributeDefinitionResponse":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The description of the calculated attribute.

      " + }, + "AttributeDetails":{ + "shape":"AttributeDetails", + "documentation":"

      Mathematical expression and a list of attribute items specified in that expression.

      " + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

      The aggregation operation to perform for the calculated attribute.

      " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was created.

      " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was most recently edited.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, "CreateDomainRequest":{ "type":"structure", "required":[ @@ -1097,6 +1434,50 @@ } } }, + "CreateEventStreamRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "EventStreamName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

      The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name

      " + }, + "EventStreamName":{ + "shape":"name", + "documentation":"

      The name of the event stream.

      ", + "location":"uri", + "locationName":"EventStreamName" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, + "CreateEventStreamResponse":{ + "type":"structure", + "required":["EventStreamArn"], + "members":{ + "EventStreamArn":{ + "shape":"string1To255", + "documentation":"

      A unique identifier for the event stream.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, "CreateIntegrationWorkflowRequest":{ "type":"structure", "required":[ @@ -1279,6 +1660,32 @@ "max":256, "pattern":".*" }, + "DeleteCalculatedAttributeDefinitionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "CalculatedAttributeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      ", + "location":"uri", + "locationName":"CalculatedAttributeName" + } + } + }, + "DeleteCalculatedAttributeDefinitionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -1301,6 +1708,32 @@ } } }, + "DeleteEventStreamRequest":{ + "type":"structure", + "required":[ + "DomainName", + "EventStreamName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "EventStreamName":{ + "shape":"name", + "documentation":"

      The name of the event stream

      ", + "location":"uri", + "locationName":"EventStreamName" + } + } + }, + "DeleteEventStreamResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteIntegrationRequest":{ "type":"structure", "required":[ @@ -1496,6 +1929,28 @@ "max":256, "pattern":".*" }, + "DestinationSummary":{ + "type":"structure", + "required":[ + "Uri", + "Status" + ], + "members":{ + "Uri":{ + "shape":"string1To255", + "documentation":"

      The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.

      " + }, + "Status":{ + "shape":"EventStreamDestinationStatus", + "documentation":"

      The status of enabling the Kinesis stream as a destination for export.

      " + }, + "UnhealthySince":{ + "shape":"timestamp", + "documentation":"

      The timestamp when the status last changed to UNHEALHY.

      " + } + }, + "documentation":"

      Summary information about the Kinesis data stream

      " + }, "DomainList":{ "type":"list", "member":{"shape":"ListDomainItem"} @@ -1528,6 +1983,90 @@ "max":1.0, "min":0.0 }, + "EventStreamDestinationDetails":{ + "type":"structure", + "required":[ + "Uri", + "Status" + ], + "members":{ + "Uri":{ + "shape":"string1To255", + "documentation":"

      The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.

      " + }, + "Status":{ + "shape":"EventStreamDestinationStatus", + "documentation":"

      The status of enabling the Kinesis stream as a destination for export.

      " + }, + "UnhealthySince":{ + "shape":"timestamp", + "documentation":"

      The timestamp when the status last changed to UNHEALHY.

      " + }, + "Message":{ + "shape":"string1To1000", + "documentation":"

      The human-readable string that corresponds to the error or success while enabling the streaming destination.

      " + } + }, + "documentation":"

      Details of the destination being used for the EventStream.

      " + }, + "EventStreamDestinationStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY" + ] + }, + "EventStreamState":{ + "type":"string", + "enum":[ + "RUNNING", + "STOPPED" + ] + }, + "EventStreamSummary":{ + "type":"structure", + "required":[ + "DomainName", + "EventStreamName", + "EventStreamArn", + "State" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      " + }, + "EventStreamName":{ + "shape":"name", + "documentation":"

      The name of the event stream.

      " + }, + "EventStreamArn":{ + "shape":"string1To255", + "documentation":"

      A unique identifier for the event stream.

      " + }, + "State":{ + "shape":"EventStreamState", + "documentation":"

      The operational state of destination stream for export.

      " + }, + "StoppedSince":{ + "shape":"timestamp", + "documentation":"

      The timestamp when the State changed to STOPPED.

      " + }, + "DestinationSummary":{ + "shape":"DestinationSummary", + "documentation":"

      Summary information about the Kinesis data stream.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + }, + "documentation":"

      An instance of EventStream in a list of EventStreams.

      " + }, + "EventStreamSummaryList":{ + "type":"list", + "member":{"shape":"EventStreamSummary"} + }, "ExportingConfig":{ "type":"structure", "members":{ @@ -1777,57 +2316,229 @@ } } }, - "GetDomainRequest":{ + "GetCalculatedAttributeDefinitionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "CalculatedAttributeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      ", + "location":"uri", + "locationName":"CalculatedAttributeName" + } + } + }, + "GetCalculatedAttributeDefinitionResponse":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The description of the calculated attribute.

      " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was created.

      " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was most recently edited.

      " + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

      The aggregation operation to perform for the calculated attribute.

      " + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + }, + "AttributeDetails":{ + "shape":"AttributeDetails", + "documentation":"

      Mathematical expression and a list of attribute items specified in that expression.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, + "GetCalculatedAttributeForProfileRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ProfileId", + "CalculatedAttributeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

      The unique identifier of a customer profile.

      ", + "location":"uri", + "locationName":"ProfileId" + }, + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      ", + "location":"uri", + "locationName":"CalculatedAttributeName" + } + } + }, + "GetCalculatedAttributeForProfileResponse":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "IsDataPartial":{ + "shape":"string1To255", + "documentation":"

      Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.

      " + }, + "Value":{ + "shape":"string1To255", + "documentation":"

      The value of the calculated attribute.

      " + } + } + }, + "GetDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "GetDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      " + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

      The default number of days until the data within the domain expires.

      " + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

      The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

      " + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

      The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

      " + }, + "Stats":{ + "shape":"DomainStats", + "documentation":"

      Usage-specific statistics about the domain.

      " + }, + "Matching":{ + "shape":"MatchingResponse", + "documentation":"

      The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

      After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

      " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the domain was created.

      " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the domain was most recently edited.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, + "GetEventStreamRequest":{ "type":"structure", - "required":["DomainName"], + "required":[ + "DomainName", + "EventStreamName" + ], "members":{ "DomainName":{ "shape":"name", "documentation":"

      The unique name of the domain.

      ", "location":"uri", "locationName":"DomainName" + }, + "EventStreamName":{ + "shape":"name", + "documentation":"

      The name of the event stream provided during create operations.

      ", + "location":"uri", + "locationName":"EventStreamName" } } }, - "GetDomainResponse":{ + "GetEventStreamResponse":{ "type":"structure", "required":[ "DomainName", + "EventStreamArn", "CreatedAt", - "LastUpdatedAt" + "State", + "DestinationDetails" ], "members":{ "DomainName":{ "shape":"name", "documentation":"

      The unique name of the domain.

      " }, - "DefaultExpirationDays":{ - "shape":"expirationDaysInteger", - "documentation":"

      The default number of days until the data within the domain expires.

      " - }, - "DefaultEncryptionKey":{ - "shape":"encryptionKey", - "documentation":"

      The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

      " - }, - "DeadLetterQueueUrl":{ - "shape":"sqsQueueUrl", - "documentation":"

      The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

      " - }, - "Stats":{ - "shape":"DomainStats", - "documentation":"

      Usage-specific statistics about the domain.

      " - }, - "Matching":{ - "shape":"MatchingResponse", - "documentation":"

      The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

      After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

      " + "EventStreamArn":{ + "shape":"string1To255", + "documentation":"

      A unique identifier for the event stream.

      " }, "CreatedAt":{ "shape":"timestamp", - "documentation":"

      The timestamp of when the domain was created.

      " + "documentation":"

      The timestamp of when the export was created.

      " }, - "LastUpdatedAt":{ + "State":{ + "shape":"EventStreamState", + "documentation":"

      The operational state of destination stream for export.

      " + }, + "StoppedSince":{ "shape":"timestamp", - "documentation":"

      The timestamp of when the domain was most recently edited.

      " + "documentation":"

      The timestamp when the State changed to STOPPED.

      " + }, + "DestinationDetails":{ + "shape":"EventStreamDestinationDetails", + "documentation":"

      Details regarding the Kinesis stream.

      " }, "Tags":{ "shape":"TagMap", @@ -2445,6 +3156,141 @@ } } }, + "ListCalculatedAttributeDefinitionItem":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The threshold for the calculated attribute.

      " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

      The threshold for the calculated attribute.

      " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was most recently edited.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + }, + "documentation":"

      The details of a single calculated attribute definition.

      " + }, + "ListCalculatedAttributeDefinitionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

      The pagination token from the previous call to ListCalculatedAttributeDefinitions.

      ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

      The maximum number of calculated attribute definitions returned per page.

      ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListCalculatedAttributeDefinitionsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"CalculatedAttributeDefinitionsList", + "documentation":"

      The list of calculated attribute definitions.

      " + }, + "NextToken":{ + "shape":"token", + "documentation":"

      The pagination token from the previous call to ListCalculatedAttributeDefinitions.

      " + } + } + }, + "ListCalculatedAttributeForProfileItem":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "IsDataPartial":{ + "shape":"string1To255", + "documentation":"

      Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.

      " + }, + "Value":{ + "shape":"string1To255", + "documentation":"

      The value of the calculated attribute.

      " + } + }, + "documentation":"

      The details of a single calculated attribute for a profile.

      " + }, + "ListCalculatedAttributesForProfileRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ProfileId" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

      The pagination token from the previous call to ListCalculatedAttributesForProfile.

      ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

      The maximum number of calculated attributes returned per page.

      ", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

      The unique identifier of a customer profile.

      ", + "location":"uri", + "locationName":"ProfileId" + } + } + }, + "ListCalculatedAttributesForProfileResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"CalculatedAttributesForProfileList", + "documentation":"

      The list of calculated attributes.

      " + }, + "NextToken":{ + "shape":"token", + "documentation":"

      The pagination token from the previous call to ListCalculatedAttributesForProfile.

      " + } + } + }, "ListDomainItem":{ "type":"structure", "required":[ @@ -2502,6 +3348,43 @@ } } }, + "ListEventStreamsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

      Identifies the next page of results to return.

      ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

      The maximum number of objects returned per page.

      ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListEventStreamsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"EventStreamSummaryList", + "documentation":"

      Contains summary information about an EventStream.

      " + }, + "NextToken":{ + "shape":"token", + "documentation":"

      Identifies the next page of results to return.

      " + } + } + }, "ListIdentityResolutionJobsRequest":{ "type":"structure", "required":["DomainName"], @@ -2799,7 +3682,7 @@ }, "ObjectFilter":{ "shape":"ObjectFilter", - "documentation":"

      Applies a filter to the response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset, _case and _order.

      " + "documentation":"

      Applies a filter to the response to include profile objects with the specified index values.

      " } } }, @@ -3081,6 +3964,11 @@ "max":512, "pattern":"\\S+" }, + "ObjectCount":{ + "type":"integer", + "max":100, + "min":1 + }, "ObjectFilter":{ "type":"structure", "required":[ @@ -3090,14 +3978,14 @@ "members":{ "KeyName":{ "shape":"name", - "documentation":"

      A searchable identifier of a standard profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, _serialNumber. The predefined keys you can use to search for _case include: _caseId. The predefined keys you can use to search for _order include: _orderId.

      " + "documentation":"

      A searchable identifier of a profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, and _serialNumber. The predefined keys you can use to search for _case include: _caseId. The predefined keys you can use to search for _order include: _orderId.

      " }, "Values":{ "shape":"requestValueList", "documentation":"

      A list of key values.

      " } }, - "documentation":"

      The filter applied to ListProfileObjects response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset, _case and _order.

      " + "documentation":"

      The filter applied to ListProfileObjects response to include profile objects with the specified index values.

      " }, "ObjectTypeField":{ "type":"structure", @@ -3140,6 +4028,15 @@ "key":{"shape":"string1To255"}, "value":{"shape":"typeName"} }, + "Operator":{ + "type":"string", + "enum":[ + "EQUAL_TO", + "GREATER_THAN", + "LESS_THAN", + "NOT_EQUAL_TO" + ] + }, "OperatorPropertiesKeys":{ "type":"string", "enum":[ @@ -3532,6 +4429,24 @@ } } }, + "Range":{ + "type":"structure", + "required":[ + "Value", + "Unit" + ], + "members":{ + "Value":{ + "shape":"Value", + "documentation":"

      The amount of time of the specified unit.

      " + }, + "Unit":{ + "shape":"Unit", + "documentation":"

      The unit of time.

      " + } + }, + "documentation":"

      The relative time period over which data is included in the aggregation.

      " + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -3883,6 +4798,19 @@ "type":"list", "member":{"shape":"StandardIdentifier"} }, + "Statistic":{ + "type":"string", + "enum":[ + "FIRST_OCCURRENCE", + "LAST_OCCURRENCE", + "COUNT", + "SUM", + "MINIMUM", + "MAXIMUM", + "AVERAGE", + "MAX_OCCURRENCE" + ] + }, "Status":{ "type":"string", "enum":[ @@ -3998,6 +4926,24 @@ "type":"list", "member":{"shape":"Task"} }, + "Threshold":{ + "type":"structure", + "required":[ + "Value", + "Operator" + ], + "members":{ + "Value":{ + "shape":"string1To255", + "documentation":"

      The value of the threshold.

      " + }, + "Operator":{ + "shape":"Operator", + "documentation":"

      The operator of the threshold.

      " + } + }, + "documentation":"

      The threshold for the calculated attribute.

      " + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -4045,6 +4991,10 @@ "OnDemand" ] }, + "Unit":{ + "type":"string", + "enum":["DAYS"] + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -4122,6 +5072,80 @@ "key":{"shape":"string1To255"}, "value":{"shape":"string0To255"} }, + "UpdateCalculatedAttributeDefinitionRequest":{ + "type":"structure", + "required":[ + "DomainName", + "CalculatedAttributeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

      The unique name of the domain.

      ", + "location":"uri", + "locationName":"DomainName" + }, + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      ", + "location":"uri", + "locationName":"CalculatedAttributeName" + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The description of the calculated attribute.

      " + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + } + } + }, + "UpdateCalculatedAttributeDefinitionResponse":{ + "type":"structure", + "members":{ + "CalculatedAttributeName":{ + "shape":"typeName", + "documentation":"

      The unique name of the calculated attribute.

      " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

      The display name of the calculated attribute.

      " + }, + "Description":{ + "shape":"text", + "documentation":"

      The description of the calculated attribute.

      " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was created.

      " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

      The timestamp of when the calculated attribute definition was most recently edited.

      " + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

      The aggregation operation to perform for the calculated attribute.

      " + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

      The conditions including range, object count, and threshold for the calculated attribute.

      " + }, + "AttributeDetails":{ + "shape":"AttributeDetails", + "documentation":"

      The mathematical expression and a list of attribute items specified in that expression.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags used to organize, track, or control access for this resource.

      " + } + } + }, "UpdateDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -4317,6 +5341,11 @@ } } }, + "Value":{ + "type":"integer", + "max":366, + "min":1 + }, "WorkflowAttributes":{ "type":"structure", "members":{ @@ -4395,7 +5424,19 @@ "max":4, "min":1 }, + "attributeName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_.-]+$" + }, "boolean":{"type":"boolean"}, + "displayName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z_][a-zA-Z_0-9-\\s]*$" + }, "encryptionKey":{ "type":"string", "max":255, diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 3da8df42ed9e..0a69910ecc69 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 3ee9a40fd8d3..b3f8db7af050 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index e87fc3d967b1..a9f02e7404a7 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/dataexchange/src/main/resources/codegen-resources/customization.config b/services/dataexchange/src/main/resources/codegen-resources/customization.config index 02e46d9f036d..78ff91600302 100644 --- a/services/dataexchange/src/main/resources/codegen-resources/customization.config +++ b/services/dataexchange/src/main/resources/codegen-resources/customization.config @@ -3,5 +3,6 @@ "SendApiAsset": { "exclude": true } - } + }, + "generateEndpointClientTests": true } diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index bab8ec848104..890bbc838edb 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 995415f52322..b6bea7a77e30 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/customization.config b/services/datasync/src/main/resources/codegen-resources/customization.config index e3772b849608..296e08b3695c 100644 --- a/services/datasync/src/main/resources/codegen-resources/customization.config +++ b/services/datasync/src/main/resources/codegen-resources/customization.config @@ -4,5 +4,6 @@ "listLocations", "listTaskExecutions", "listTasks" - ] + ], + "generateEndpointClientTests": true } diff --git a/services/dax/pom.xml b/services/dax/pom.xml index 20f1a82962b9..ad37b364c14b 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 73ddf4048721..77cb7fa7eeac 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index daa00f205aad..fa08ebec4e56 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 8d2e0474695f..3cbdcfbe4e5c 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/devopsguru/src/main/resources/codegen-resources/endpoint-tests.json b/services/devopsguru/src/main/resources/codegen-resources/endpoint-tests.json index c03e296a37ec..22829574c14d 100644 --- a/services/devopsguru/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/devopsguru/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -282,8 +282,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -295,8 +306,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -308,8 +330,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -321,8 +354,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +378,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +391,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -360,8 +404,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -372,8 +416,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -384,10 +428,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/devopsguru/src/main/resources/codegen-resources/service-2.json b/services/devopsguru/src/main/resources/codegen-resources/service-2.json index fdb10983689e..26438299a702 100644 --- a/services/devopsguru/src/main/resources/codegen-resources/service-2.json +++ b/services/devopsguru/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

      Adds a notification channel to DevOps Guru. A notification channel is used to notify you about important DevOps Guru events, such as when an insight is generated.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " + "documentation":"

      Adds a notification channel to DevOps Guru. A notification channel is used to notify you about important DevOps Guru events, such as when an insight is generated.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to send it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " }, "DeleteInsight":{ "name":"DeleteInsight", @@ -1759,6 +1759,48 @@ "exception":true, "fault":true }, + "KMSKeyId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^.*$" + }, + "KMSServerSideEncryptionIntegration":{ + "type":"structure", + "members":{ + "KMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

      Describes the specified KMS key.

      To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". If you specify a predefined Amazon Web Services alias (an Amazon Web Services alias with no key ID), Amazon Web Services KMS associates the alias with an Amazon Web Services managed key and returns its KeyId and Arn in the response. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

      For example:

      Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      Alias name: alias/ExampleAlias

      Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

      " + }, + "OptInStatus":{ + "shape":"OptInStatus", + "documentation":"

      Specifies if DevOps Guru is enabled for customer managed keys.

      " + }, + "Type":{ + "shape":"ServerSideEncryptionType", + "documentation":"

      The type of KMS key used. Customer managed keys are the KMS keys that you create. Amazon Web Services owned keys are keys that are owned and managed by DevOps Guru.

      " + } + }, + "documentation":"

      Information about the KMS encryption used with DevOps Guru.

      " + }, + "KMSServerSideEncryptionIntegrationConfig":{ + "type":"structure", + "members":{ + "KMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

      Describes the specified KMS key.

      To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". If you specify a predefined Amazon Web Services alias (an Amazon Web Services alias with no key ID), Amazon Web Services KMS associates the alias with an Amazon Web Services managed key and returns its KeyId and Arn in the response. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

      For example:

      Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      Alias name: alias/ExampleAlias

      Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

      " + }, + "OptInStatus":{ + "shape":"OptInStatus", + "documentation":"

      Specifies if DevOps Guru is enabled for KMS integration.

      " + }, + "Type":{ + "shape":"ServerSideEncryptionType", + "documentation":"

      The type of KMS key used. Customer managed keys are the KMS keys that you create. Amazon Web Services owned keys are keys that are owned and managed by DevOps Guru.

      " + } + }, + "documentation":"

      Information about whether DevOps Guru is configured to encrypt server-side data using KMS.

      " + }, "ListAnomaliesForInsightFilters":{ "type":"structure", "members":{ @@ -2372,7 +2414,7 @@ "documentation":"

      A NotificationChannelConfig object that contains information about configured notification channels.

      " } }, - "documentation":"

      Information about a notification channel. A notification channel is used to notify you when DevOps Guru creates an insight. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " + "documentation":"

      Information about a notification channel. A notification channel is used to notify you when DevOps Guru creates an insight. The one supported notification channel is Amazon Simple Notification Service (Amazon SNS).

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to send it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " }, "NotificationChannelConfig":{ "type":"structure", @@ -2380,7 +2422,7 @@ "members":{ "Sns":{ "shape":"SnsChannelConfig", - "documentation":"

      Information about a notification channel configured in DevOps Guru to send notifications when insights are created.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " + "documentation":"

      Information about a notification channel configured in DevOps Guru to send notifications when insights are created.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to send it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " }, "Filters":{ "shape":"NotificationFilterConfig", @@ -3552,6 +3594,13 @@ } } }, + "ServerSideEncryptionType":{ + "type":"string", + "enum":[ + "CUSTOMER_MANAGED_KEY", + "AWS_OWNED_KMS_KEY" + ] + }, "ServiceCollection":{ "type":"structure", "members":{ @@ -3608,6 +3657,10 @@ "LogsAnomalyDetection":{ "shape":"LogsAnomalyDetectionIntegration", "documentation":"

      Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.

      " + }, + "KMSServerSideEncryption":{ + "shape":"KMSServerSideEncryptionIntegration", + "documentation":"

      Information about whether DevOps Guru is configured to encrypt server-side data using KMS.

      " } }, "documentation":"

      Information about the integration of DevOps Guru with another Amazon Web Services service, such as Amazon Web Services Systems Manager.

      " @@ -3693,7 +3746,7 @@ "documentation":"

      The Amazon Resource Name (ARN) of an Amazon Simple Notification Service topic.

      " } }, - "documentation":"

      Contains the Amazon Resource Name (ARN) of an Amazon Simple Notification Service topic.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. For more information, see Permissions for cross account Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " + "documentation":"

      Contains the Amazon Resource Name (ARN) of an Amazon Simple Notification Service topic.

      If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru permission to send it notifications. DevOps Guru adds the required policy on your behalf to send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS topics. For more information, see Permissions for Amazon SNS topics.

      If you use an Amazon SNS topic that is encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then you must add permissions to the CMK. For more information, see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics.

      " }, "SsmOpsItemId":{ "type":"string", @@ -3967,6 +4020,10 @@ "LogsAnomalyDetection":{ "shape":"LogsAnomalyDetectionIntegrationConfig", "documentation":"

      Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.

      " + }, + "KMSServerSideEncryption":{ + "shape":"KMSServerSideEncryptionIntegrationConfig", + "documentation":"

      Information about whether DevOps Guru is configured to encrypt server-side data using KMS.

      " } }, "documentation":"

      Information about updating the integration status of an Amazon Web Services service, such as Amazon Web Services Systems Manager, with DevOps Guru.

      " diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 6e4571e55690..d4b6780db5bf 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/service-2.json b/services/directconnect/src/main/resources/codegen-resources/service-2.json index a66f40b7ed04..7de7943feef1 100644 --- a/services/directconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/directconnect/src/main/resources/codegen-resources/service-2.json @@ -915,7 +915,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

      Updates the specified attributes of the specified virtual private interface.

      Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual q interface supports jumbo frames, call DescribeVirtualInterfaces.

      " + "documentation":"

      Updates the specified attributes of the specified virtual private interface.

      Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

      " } }, "shapes":{ @@ -1472,7 +1472,7 @@ }, "jumboFrameCapable":{ "shape":"JumboFrameCapable", - "documentation":"

      Indicates whether jumbo frames (9001 MTU) are supported.

      " + "documentation":"

      Indicates whether jumbo frames are supported.

      " }, "awsDeviceV2":{ "shape":"AwsDeviceV2", @@ -2685,7 +2685,7 @@ }, "jumboFrameCapable":{ "shape":"JumboFrameCapable", - "documentation":"

      Indicates whether jumbo frames (9001 MTU) are supported.

      " + "documentation":"

      Indicates whether jumbo frames are supported.

      " }, "awsDeviceV2":{ "shape":"AwsDeviceV2", @@ -2799,7 +2799,7 @@ }, "jumboFrameCapable":{ "shape":"JumboFrameCapable", - "documentation":"

      Indicates whether jumbo frames (9001 MTU) are supported.

      " + "documentation":"

      Indicates whether jumbo frames are supported.

      " }, "hasLogicalRedundancy":{ "shape":"HasLogicalRedundancy", @@ -3764,7 +3764,7 @@ }, "jumboFrameCapable":{ "shape":"JumboFrameCapable", - "documentation":"

      Indicates whether jumbo frames (9001 MTU) are supported.

      " + "documentation":"

      Indicates whether jumbo frames are supported.

      " }, "virtualGatewayId":{ "shape":"VirtualGatewayId", diff --git a/services/directory/pom.xml b/services/directory/pom.xml index b2683b9b578f..d85eadfaa379 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 914034073721..4d3c51eaf02e 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 47fd6fe46ad7..fb04a2262e85 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 41b1de8bb48d..b39f2c03b04a 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index 6ba12cfb6f0e..418e8b4197a5 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/drs/src/main/resources/codegen-resources/paginators-1.json b/services/drs/src/main/resources/codegen-resources/paginators-1.json index 3158e95a566f..bf7f0e978df8 100644 --- a/services/drs/src/main/resources/codegen-resources/paginators-1.json +++ b/services/drs/src/main/resources/codegen-resources/paginators-1.json @@ -36,6 +36,12 @@ "limit_key": "maxResults", "result_key": "items" }, + "DescribeSourceNetworks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, "DescribeSourceServers": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/drs/src/main/resources/codegen-resources/service-2.json b/services/drs/src/main/resources/codegen-resources/service-2.json index d154f217dc50..db8d44dd9fe7 100644 --- a/services/drs/src/main/resources/codegen-resources/service-2.json +++ b/services/drs/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,26 @@ "uid":"drs-2020-02-26" }, "operations":{ + "AssociateSourceNetworkStack":{ + "name":"AssociateSourceNetworkStack", + "http":{ + "method":"POST", + "requestUri":"/AssociateSourceNetworkStack", + "responseCode":202 + }, + "input":{"shape":"AssociateSourceNetworkStackRequest"}, + "output":{"shape":"AssociateSourceNetworkStackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Associate a Source Network to an existing CloudFormation Stack and modify launch templates to use this network. Can be used for reverting to previously deployed CloudFormation stacks.

      " + }, "CreateExtendedSourceServer":{ "name":"CreateExtendedSourceServer", "http":{ @@ -71,6 +91,26 @@ ], "documentation":"

      Creates a new ReplicationConfigurationTemplate.

      " }, + "CreateSourceNetwork":{ + "name":"CreateSourceNetwork", + "http":{ + "method":"POST", + "requestUri":"/CreateSourceNetwork", + "responseCode":201 + }, + "input":{"shape":"CreateSourceNetworkRequest"}, + "output":{"shape":"CreateSourceNetworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Create a new Source Network resource for a provided VPC ID.

      " + }, "DeleteJob":{ "name":"DeleteJob", "http":{ @@ -145,6 +185,25 @@ "documentation":"

      Deletes a single Replication Configuration Template by ID

      ", "idempotent":true }, + "DeleteSourceNetwork":{ + "name":"DeleteSourceNetwork", + "http":{ + "method":"POST", + "requestUri":"/DeleteSourceNetwork", + "responseCode":204 + }, + "input":{"shape":"DeleteSourceNetworkRequest"}, + "output":{"shape":"DeleteSourceNetworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Delete Source Network resource.

      ", + "idempotent":true + }, "DeleteSourceServer":{ "name":"DeleteSourceServer", "http":{ @@ -269,6 +328,23 @@ ], "documentation":"

      Lists all ReplicationConfigurationTemplates, filtered by Source Server IDs.

      " }, + "DescribeSourceNetworks":{ + "name":"DescribeSourceNetworks", + "http":{ + "method":"POST", + "requestUri":"/DescribeSourceNetworks", + "responseCode":200 + }, + "input":{"shape":"DescribeSourceNetworksRequest"}, + "output":{"shape":"DescribeSourceNetworksResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Lists all Source Networks or multiple Source Networks filtered by ID.

      " + }, "DescribeSourceServers":{ "name":"DescribeSourceServers", "http":{ @@ -322,6 +398,25 @@ ], "documentation":"

      Disconnects a specific Source Server from Elastic Disaster Recovery. Data replication is stopped immediately. All AWS resources created by Elastic Disaster Recovery for enabling the replication of the Source Server will be terminated / deleted within 90 minutes. You cannot disconnect a Source Server if it has a Recovery Instance. If the agent on the Source Server has not been prevented from communicating with the Elastic Disaster Recovery service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

      " }, + "ExportSourceNetworkCfnTemplate":{ + "name":"ExportSourceNetworkCfnTemplate", + "http":{ + "method":"POST", + "requestUri":"/ExportSourceNetworkCfnTemplate", + "responseCode":200 + }, + "input":{"shape":"ExportSourceNetworkCfnTemplateRequest"}, + "output":{"shape":"ExportSourceNetworkCfnTemplateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Export the Source Network CloudFormation template to an S3 bucket.

      " + }, "GetFailbackReplicationConfiguration":{ "name":"GetFailbackReplicationConfiguration", "http":{ @@ -540,6 +635,43 @@ ], "documentation":"

      Starts replication for a stopped Source Server. This action would make the Source Server protected again and restart billing for it.

      " }, + "StartSourceNetworkRecovery":{ + "name":"StartSourceNetworkRecovery", + "http":{ + "method":"POST", + "requestUri":"/StartSourceNetworkRecovery", + "responseCode":202 + }, + "input":{"shape":"StartSourceNetworkRecoveryRequest"}, + "output":{"shape":"StartSourceNetworkRecoveryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Deploy VPC for the specified Source Network and modify launch templates to use this network. The VPC will be deployed using a dedicated CloudFormation stack.

      " + }, + "StartSourceNetworkReplication":{ + "name":"StartSourceNetworkReplication", + "http":{ + "method":"POST", + "requestUri":"/StartSourceNetworkReplication", + "responseCode":200 + }, + "input":{"shape":"StartSourceNetworkReplicationRequest"}, + "output":{"shape":"StartSourceNetworkReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Starts replication for a Source Network. This action would make the Source Network protected.

      " + }, "StopFailback":{ "name":"StopFailback", "http":{ @@ -574,6 +706,25 @@ ], "documentation":"

      Stops replication for a Source Server. This action would make the Source Server unprotected, delete its existing snapshots and stop billing for it.

      " }, + "StopSourceNetworkReplication":{ + "name":"StopSourceNetworkReplication", + "http":{ + "method":"POST", + "requestUri":"/StopSourceNetworkReplication", + "responseCode":200 + }, + "input":{"shape":"StopSourceNetworkReplicationRequest"}, + "output":{"shape":"StopSourceNetworkReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

      Stops replication for a Source Network. This action would make the Source Network unprotected.

      " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -773,6 +924,32 @@ "max":50, "min":0 }, + "AssociateSourceNetworkStackRequest":{ + "type":"structure", + "required":[ + "cfnStackName", + "sourceNetworkID" + ], + "members":{ + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

      CloudFormation template to associate with a Source Network.

      " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      The Source Network ID to associate with CloudFormation template.

      " + } + } + }, + "AssociateSourceNetworkStackResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"Job", + "documentation":"

      The Source Network association Job.

      " + } + } + }, "AwsAvailabilityZone":{ "type":"string", "max":255, @@ -808,6 +985,13 @@ }, "documentation":"

      Information about a server's CPU.

      " }, + "CfnStackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z][-a-zA-Z0-9]*$", + "sensitive":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -900,6 +1084,10 @@ "shape":"Boolean", "documentation":"

      Copy tags.

      " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

      S3 bucket ARN to export Source Network templates.

      " + }, "launchDisposition":{ "shape":"LaunchDisposition", "documentation":"

      Launch disposition.

      " @@ -1006,6 +1194,41 @@ } } }, + "CreateSourceNetworkRequest":{ + "type":"structure", + "required":[ + "originAccountID", + "originRegion", + "vpcID" + ], + "members":{ + "originAccountID":{ + "shape":"AccountID", + "documentation":"

      Account containing the VPC to protect.

      " + }, + "originRegion":{ + "shape":"AwsRegion", + "documentation":"

      Region containing the VPC to protect.

      " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

      A set of tags to be associated with the Source Network resource.

      " + }, + "vpcID":{ + "shape":"VpcID", + "documentation":"

      Which VPC ID to protect.

      " + } + } + }, + "CreateSourceNetworkResponse":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      ID of the created Source Network.

      " + } + } + }, "DataReplicationError":{ "type":"structure", "members":{ @@ -1237,6 +1460,21 @@ "members":{ } }, + "DeleteSourceNetworkRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      ID of the Source Network to delete.

      " + } + } + }, + "DeleteSourceNetworkResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteSourceServerRequest":{ "type":"structure", "required":["sourceServerID"], @@ -1498,6 +1736,60 @@ } } }, + "DescribeSourceNetworksRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"DescribeSourceNetworksRequestFilters", + "documentation":"

      A set of filters by which to return Source Networks.

      " + }, + "maxResults":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

      Maximum number of Source Networks to retrieve.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      The token of the next Source Networks to retrieve.

      " + } + } + }, + "DescribeSourceNetworksRequestFilters":{ + "type":"structure", + "members":{ + "originAccountID":{ + "shape":"AccountID", + "documentation":"

      Filter Source Networks by account ID containing the protected VPCs.

      " + }, + "originRegion":{ + "shape":"AwsRegion", + "documentation":"

      Filter Source Networks by the region containing the protected VPCs.

      " + }, + "sourceNetworkIDs":{ + "shape":"DescribeSourceNetworksRequestFiltersIDs", + "documentation":"

      An array of Source Network IDs that should be returned. An empty array means all Source Networks.

      " + } + }, + "documentation":"

      A set of filters by which to return Source Networks.

      " + }, + "DescribeSourceNetworksRequestFiltersIDs":{ + "type":"list", + "member":{"shape":"SourceNetworkID"}, + "max":100, + "min":0 + }, + "DescribeSourceNetworksResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"SourceNetworksList", + "documentation":"

      An array of Source Networks.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      The token of the next Source Networks to retrieve.

      " + } + } + }, "DescribeSourceServersRequest":{ "type":"structure", "members":{ @@ -1629,6 +1921,36 @@ "min":10, "pattern":"^vol-([0-9a-fA-F]{8}|[0-9a-fA-F]{17})$" }, + "EventResourceData":{ + "type":"structure", + "members":{ + "sourceNetworkData":{ + "shape":"SourceNetworkData", + "documentation":"

      Source Network properties.

      " + } + }, + "documentation":"

      Properties of resource related to a job event.

      ", + "union":true + }, + "ExportSourceNetworkCfnTemplateRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      The Source Network ID to export its CloudFormation template to an S3 bucket.

      " + } + } + }, + "ExportSourceNetworkCfnTemplateResponse":{ + "type":"structure", + "members":{ + "s3DestinationUrl":{ + "shape":"LargeBoundedString", + "documentation":"

      S3 bucket URL where the Source Network CloudFormation template was exported to.

      " + } + } + }, "ExtensionStatus":{ "type":"string", "enum":[ @@ -1789,7 +2111,10 @@ "FAILBACK", "DIAGNOSTIC", "TERMINATE_RECOVERY_INSTANCES", - "TARGET_ACCOUNT" + "TARGET_ACCOUNT", + "CREATE_NETWORK_RECOVERY", + "UPDATE_NETWORK_RECOVERY", + "ASSOCIATE_NETWORK_RECOVERY" ] }, "InternalServerException":{ @@ -1833,6 +2158,10 @@ "shape":"JobID", "documentation":"

      The ID of the Job.

      " }, + "participatingResources":{ + "shape":"ParticipatingResources", + "documentation":"

      A list of resources that the Job is acting upon.

      " + }, "participatingServers":{ "shape":"ParticipatingServers", "documentation":"

      A list of servers that the Job is acting upon.

      " @@ -1895,7 +2224,17 @@ "LAUNCH_START", "LAUNCH_FAILED", "JOB_CANCEL", - "JOB_END" + "JOB_END", + "DEPLOY_NETWORK_CONFIGURATION_START", + "DEPLOY_NETWORK_CONFIGURATION_END", + "DEPLOY_NETWORK_CONFIGURATION_FAILED", + "UPDATE_NETWORK_CONFIGURATION_START", + "UPDATE_NETWORK_CONFIGURATION_END", + "UPDATE_NETWORK_CONFIGURATION_FAILED", + "UPDATE_LAUNCH_TEMPLATE_START", + "UPDATE_LAUNCH_TEMPLATE_END", + "UPDATE_LAUNCH_TEMPLATE_FAILED", + "NETWORK_RECOVERY_FAIL" ] }, "JobLogEventData":{ @@ -1909,6 +2248,10 @@ "shape":"EC2InstanceID", "documentation":"

      The ID of a conversion server.

      " }, + "eventResourceData":{ + "shape":"EventResourceData", + "documentation":"

      Properties of resource related to a job event.

      " + }, "rawError":{ "shape":"LargeBoundedString", "documentation":"

      A string representing a job error.

      " @@ -2021,6 +2364,10 @@ "shape":"Boolean", "documentation":"

      Copy tags.

      " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

      S3 bucket ARN to export Source Network templates.

      " + }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

      ID of the Launch Configuration Template.

      " @@ -2336,6 +2683,35 @@ "max":2048, "min":0 }, + "ParticipatingResource":{ + "type":"structure", + "members":{ + "launchStatus":{ + "shape":"LaunchStatus", + "documentation":"

      The launch status of a participating resource.

      " + }, + "participatingResourceID":{ + "shape":"ParticipatingResourceID", + "documentation":"

      The ID of a participating resource.

      " + } + }, + "documentation":"

      Represents a resource participating in an asynchronous Job.

      " + }, + "ParticipatingResourceID":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      Source Network ID.

      " + } + }, + "documentation":"

      ID of a resource participating in an asynchronous Job.

      ", + "union":true + }, + "ParticipatingResources":{ + "type":"list", + "member":{"shape":"ParticipatingResource"} + }, "ParticipatingServer":{ "type":"structure", "members":{ @@ -2708,6 +3084,36 @@ "max":200, "min":1 }, + "RecoveryLifeCycle":{ + "type":"structure", + "members":{ + "apiCallDateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      The date and time the last Source Network recovery was initiated.

      " + }, + "jobID":{ + "shape":"JobID", + "documentation":"

      The ID of the Job that was used to last recover the Source Network.

      " + }, + "lastRecoveryResult":{ + "shape":"RecoveryResult", + "documentation":"

      The status of the last recovery status of this Source Network.

      " + } + }, + "documentation":"

      An object representing the Source Network recovery Lifecycle.

      " + }, + "RecoveryResult":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "SUCCESS", + "FAIL", + "PARTIAL_SUCCESS", + "ASSOCIATE_SUCCESS", + "ASSOCIATE_FAIL" + ] + }, "RecoverySnapshot":{ "type":"structure", "required":[ @@ -2849,7 +3255,8 @@ "type":"string", "enum":[ "DEFAULT", - "CUSTOM" + "CUSTOM", + "NONE" ] }, "ReplicationConfigurationReplicatedDisk":{ @@ -3004,6 +3411,15 @@ "max":32, "min":0 }, + "ReplicationStatus":{ + "type":"string", + "enum":[ + "STOPPED", + "IN_PROGRESS", + "PROTECTED", + "ERROR" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -3062,6 +3478,12 @@ "min":0, "pattern":"^sg-[0-9a-fA-F]{8,}$" }, + "SensitiveBoundedString":{ + "type":"string", + "max":256, + "min":0, + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -3114,6 +3536,88 @@ }, "documentation":"

      Properties of the cloud environment where this Source Server originated from.

      " }, + "SourceNetwork":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ARN", + "documentation":"

      The ARN of the Source Network.

      " + }, + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

      CloudFormation stack name that was deployed for recovering the Source Network.

      " + }, + "lastRecovery":{ + "shape":"RecoveryLifeCycle", + "documentation":"

      An object containing information regarding the last recovery of the Source Network.

      " + }, + "launchedVpcID":{ + "shape":"VpcID", + "documentation":"

      ID of the recovered VPC following Source Network recovery.

      " + }, + "replicationStatus":{ + "shape":"ReplicationStatus", + "documentation":"

      Status of Source Network Replication. Possible values: (a) STOPPED - Source Network is not replicating. (b) IN_PROGRESS - Source Network is being replicated. (c) PROTECTED - Source Network was replicated successfully and is being synchronized for changes. (d) ERROR - Source Network replication has failed

      " + }, + "replicationStatusDetails":{ + "shape":"SensitiveBoundedString", + "documentation":"

      Error details in case Source Network replication status is ERROR.

      " + }, + "sourceAccountID":{ + "shape":"AccountID", + "documentation":"

      Account ID containing the VPC protected by the Source Network.

      " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      Source Network ID.

      " + }, + "sourceRegion":{ + "shape":"AwsRegion", + "documentation":"

      Region containing the VPC protected by the Source Network.

      " + }, + "sourceVpcID":{ + "shape":"VpcID", + "documentation":"

      VPC ID protected by the Source Network.

      " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

      A list of tags associated with the Source Network.

      " + } + }, + "documentation":"

      The ARN of the Source Network.

      " + }, + "SourceNetworkData":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      Source Network ID.

      " + }, + "sourceVpc":{ + "shape":"VpcID", + "documentation":"

      VPC ID protected by the Source Network.

      " + }, + "stackName":{ + "shape":"LargeBoundedString", + "documentation":"

      CloudFormation stack name that was deployed for recovering the Source Network.

      " + }, + "targetVpc":{ + "shape":"VpcID", + "documentation":"

      ID of the recovered VPC following Source Network recovery.

      " + } + }, + "documentation":"

      Properties of Source Network related to a job event.

      " + }, + "SourceNetworkID":{ + "type":"string", + "max":20, + "min":20, + "pattern":"^sn-[0-9a-zA-Z]{17}$" + }, + "SourceNetworksList":{ + "type":"list", + "member":{"shape":"SourceNetwork"} + }, "SourceProperties":{ "type":"structure", "members":{ @@ -3191,6 +3695,10 @@ "shape":"SourceCloudProperties", "documentation":"

      Source cloud properties of the Source Server.

      " }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      ID of the Source Network which is protecting this Source Server's network.

      " + }, "sourceProperties":{ "shape":"SourceProperties", "documentation":"

      The source properties of the Source Server.

      " @@ -3369,6 +3877,73 @@ } } }, + "StartSourceNetworkRecoveryRequest":{ + "type":"structure", + "required":["sourceNetworks"], + "members":{ + "deployAsNew":{ + "shape":"Boolean", + "documentation":"

      Don't update existing CloudFormation Stack, recover the network using a new stack.

      " + }, + "sourceNetworks":{ + "shape":"StartSourceNetworkRecoveryRequestNetworkEntries", + "documentation":"

      The Source Networks that we want to start a Recovery Job for.

      " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

      The tags to be associated with the Source Network recovery Job.

      " + } + } + }, + "StartSourceNetworkRecoveryRequestNetworkEntries":{ + "type":"list", + "member":{"shape":"StartSourceNetworkRecoveryRequestNetworkEntry"}, + "max":100, + "min":1 + }, + "StartSourceNetworkRecoveryRequestNetworkEntry":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

      CloudFormation stack name to be used for recovering the network.

      " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      The ID of the Source Network you want to recover.

      " + } + }, + "documentation":"

      An object representing the Source Network to recover.

      " + }, + "StartSourceNetworkRecoveryResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"Job", + "documentation":"

      The Source Network recovery Job.

      " + } + } + }, + "StartSourceNetworkReplicationRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      ID of the Source Network to replicate.

      " + } + } + }, + "StartSourceNetworkReplicationResponse":{ + "type":"structure", + "members":{ + "sourceNetwork":{ + "shape":"SourceNetwork", + "documentation":"

      Source Network which was requested for replication.

      " + } + } + }, "StopFailbackRequest":{ "type":"structure", "required":["recoveryInstanceID"], @@ -3398,6 +3973,25 @@ } } }, + "StopSourceNetworkReplicationRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

      ID of the Source Network to stop replication.

      " + } + } + }, + "StopSourceNetworkReplicationResponse":{ + "type":"structure", + "members":{ + "sourceNetwork":{ + "shape":"SourceNetwork", + "documentation":"

      Source Network which was requested to stop replication.

      " + } + } + }, "StrictlyPositiveInteger":{ "type":"integer", "min":1 @@ -3408,6 +4002,10 @@ "min":0, "pattern":"^subnet-[0-9a-fA-F]{8,}$" }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "TagKey":{ "type":"string", "max":256, @@ -3603,6 +4201,10 @@ "shape":"Boolean", "documentation":"

      Copy tags.

      " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

      S3 bucket ARN to export Source Network templates.

      " + }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

      Launch Configuration Template ID.

      " @@ -3831,6 +4433,12 @@ "type":"map", "key":{"shape":"LargeBoundedString"}, "value":{"shape":"PositiveInteger"} + }, + "VpcID":{ + "type":"string", + "max":21, + "min":12, + "pattern":"^vpc-[0-9a-fA-F]{8,}$" } }, "documentation":"

      AWS Elastic Disaster Recovery Service.

      " diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 3441841f09ed..d3c1b85a9cb3 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index 6ced961e524a..888e338fc020 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -41,7 +41,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

      The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

      A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

      If you request more than 100 items, BatchGetItem returns a ValidationException with the message \"Too many items requested for the BatchGetItem call.\"

      For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

      If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

      If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

      For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

      By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

      In order to minimize response latency, BatchGetItem may retrieve items in parallel.

      When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

      If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

      ", + "documentation":"

      The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

      A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

      If you request more than 100 items, BatchGetItem returns a ValidationException with the message \"Too many items requested for the BatchGetItem call.\"

      For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

      If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

      If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

      For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

      By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

      In order to minimize response latency, BatchGetItem may retrieve items in parallel.

      When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

      If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

      ", "endpointdiscovery":{ } }, @@ -230,7 +230,7 @@ }, "input":{"shape":"DescribeEndpointsRequest"}, "output":{"shape":"DescribeEndpointsResponse"}, - "documentation":"

      Returns the regional endpoint information. This action must be included in your VPC endpoint policies, or access to the DescribeEndpoints API will be denied. For more information on policy permissions, please see Internetwork traffic privacy.

      ", + "documentation":"

      Returns the regional endpoint information. For more information on policy permissions, please see Internetwork traffic privacy.

      ", "endpointoperation":true }, "DescribeExport":{ @@ -3725,7 +3725,7 @@ "documentation":"

      Too many operations for a given subscriber.

      " } }, - "documentation":"

      There is no limit to the number of daily on-demand backups that can be taken.

      For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

      When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

      When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

      There is a soft account quota of 2,500 tables.

      ", + "documentation":"

      There is no limit to the number of daily on-demand backups that can be taken.

      For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

      When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

      When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

      There is a soft account quota of 2,500 tables.

      GetRecords was called with a value of more than 1000 for the limit request parameter.

      More than 2 processes are reading from the same streams shard at the same time. Exceeding this limit may result in request throttling.

      ", "exception":true }, "ListAttributeValue":{ @@ -4214,11 +4214,11 @@ "members":{ "ReadCapacityUnits":{ "shape":"PositiveLongObject", - "documentation":"

      The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

      If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

      " + "documentation":"

      The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

      If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

      " }, "WriteCapacityUnits":{ "shape":"PositiveLongObject", - "documentation":"

      The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

      If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

      " + "documentation":"

      The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

      If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

      " } }, "documentation":"

      Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation.

      For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.

      " diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json index d086a70a8612..911bf62628e8 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,94 +111,321 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "local" + ] + } + ], + "endpoint": { + "url": "http://localhost:8000", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-cn", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -238,125 +445,81 @@ } ], "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://streams.dynamodb.{Region}.amazonaws.com", "properties": {}, "headers": {} }, "type": "endpoint" }, { - "conditions": [], + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-iso", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], "endpoint": { - "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://streams.dynamodb.{Region}.c2s.ic.gov", "properties": {}, "headers": {} }, "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", - "argv": [ + "conditions": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "fn": "stringEquals", + "argv": [ + "aws-iso-b", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.sc2s.sgov.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "local" - ] - } - ], - "endpoint": { - "url": "http://localhost:8000", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "dynamodb" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json index d24464c47857..8fa93e555fbe 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json @@ -1,1028 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-1.api.aws" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-1.api.aws" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ca-central-1.api.aws" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ca-central-1.api.aws" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-3.amazonaws.com" + "url": "https://streams.dynamodb.af-south-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-3", + "Region": "af-south-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-2.amazonaws.com" + "url": "https://streams.dynamodb.ap-east-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-2", + "Region": "ap-east-1", "UseFIPS": false, "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -1037,673 +40,513 @@ } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.sa-east-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-northeast-2.amazonaws.com" } }, "params": { - "Region": "sa-east-1", + "Region": "ap-northeast-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-east-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-northeast-3.amazonaws.com" } }, "params": { - "Region": "ap-east-1", + "Region": "ap-northeast-3", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://streams.dynamodb.ap-south-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-north-1.amazonaws.com.cn" + "url": "https://streams.dynamodb.ap-southeast-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "ap-southeast-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-gov-west-1.api.aws" + "url": "https://streams.dynamodb.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-southeast-3.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, + "Region": "ap-southeast-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.api.aws" + "url": "https://streams.dynamodb.ca-central-1.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-central-1.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eu-central-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-1.api.aws" + "url": "https://streams.dynamodb.eu-north-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-south-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, + "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-1.api.aws" + "url": "https://streams.dynamodb.eu-west-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-west-2.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", + "Region": "eu-west-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-2.api.aws" + "url": "https://streams.dynamodb.eu-west-3.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region local with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-2.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://localhost:8000" } }, "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, + "Region": "local", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-2.api.aws" + "url": "https://streams.dynamodb.me-south-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "me-south-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-2.amazonaws.com" + "url": "https://streams.dynamodb.sa-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "sa-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://streams.dynamodb.us-east-1.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://streams.dynamodb.us-east-2.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://streams.dynamodb.us-west-1.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-iso-east-1.c2s.ic.gov" + "url": "https://streams.dynamodb.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-3.api.aws" + "url": "https://streams.dynamodb-fips.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-3.amazonaws.com" + "url": "https://streams.dynamodb-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-3.api.aws" + "url": "https://streams.dynamodb.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-3.amazonaws.com" + "url": "https://streams.dynamodb.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-3", + "Region": "cn-north-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-4.amazonaws.com" + "url": "https://streams.dynamodb.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": true, + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-4.api.aws" + "url": "https://streams.dynamodb-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": false, + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-4.amazonaws.com" + "url": "https://streams.dynamodb-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": false, + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-1.api.aws" + "url": "https://streams.dynamodb.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-1.amazonaws.com" + "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-1.api.aws" + "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-1.amazonaws.com" + "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-2.api.aws" + "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-2.amazonaws.com" + "url": "https://streams.dynamodb-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-2.api.aws" + "url": "https://streams.dynamodb.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-2.amazonaws.com" + "url": "https://streams.dynamodb.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-2", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://streams.dynamodb.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-northwest-1.amazonaws.com.cn" + "url": "https://streams.dynamodb.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } @@ -1744,27 +587,27 @@ } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" @@ -1793,6 +636,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json index 9b65e8fcf831..098679799516 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json @@ -314,7 +314,7 @@ "documentation":"

      Too many operations for a given subscriber.

      " } }, - "documentation":"

      There is no limit to the number of daily on-demand backups that can be taken.

      For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

      When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

      When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

      There is a soft account quota of 2,500 tables.

      ", + "documentation":"

      There is no limit to the number of daily on-demand backups that can be taken.

      For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

      When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

      When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

      There is a soft account quota of 2,500 tables.

      GetRecords was called with a value of more than 1000 for the limit request parameter.

      More than 2 processes are reading from the same streams shard at the same time. Exceeding this limit may result in request throttling.

      ", "exception":true }, "ListAttributeValue":{ @@ -397,7 +397,7 @@ }, "eventSource":{ "shape":"String", - "documentation":"

      The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

      " + "documentation":"

      The Amazon Web Services service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

      " }, "awsRegion":{ "shape":"String", @@ -502,7 +502,7 @@ }, "StreamLabel":{ "shape":"String", - "documentation":"

      A timestamp, in ISO 8601 format, for this stream.

      Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

      • the AWS customer ID.

      • the table name

      • the StreamLabel

      " + "documentation":"

      A timestamp, in ISO 8601 format, for this stream.

      Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

      • the Amazon Web Services customer ID.

      • the table name

      • the StreamLabel

      " } }, "documentation":"

      Represents all of the data describing a particular stream.

      " @@ -521,7 +521,7 @@ }, "StreamLabel":{ "shape":"String", - "documentation":"

      A timestamp, in ISO 8601 format, for this stream.

      Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

      • the AWS customer ID.

      • the table name

      • the StreamLabel

      " + "documentation":"

      A timestamp, in ISO 8601 format, for this stream.

      Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

      • the Amazon Web Services customer ID.

      • the table name

      • the StreamLabel

      " }, "StreamStatus":{ "shape":"StreamStatus", @@ -563,7 +563,7 @@ "members":{ "ApproximateCreationDateTime":{ "shape":"Date", - "documentation":"

      The approximate date and time when the stream record was created, in UNIX epoch time format.

      " + "documentation":"

      The approximate date and time when the stream record was created, in UNIX epoch time format and rounded down to the closest second.

      " }, "Keys":{ "shape":"AttributeMap", diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 73010ef2f4b0..dd7319a65779 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index b0add975ab13..85d3169718b0 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/paginators-1.json b/services/ec2/src/main/resources/codegen-resources/paginators-1.json index c2d22676e342..7d3eebedeb8a 100644 --- a/services/ec2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ec2/src/main/resources/codegen-resources/paginators-1.json @@ -195,6 +195,12 @@ "output_token": "NextToken", "result_key": "ImportSnapshotTasks" }, + "DescribeInstanceConnectEndpoints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceConnectEndpoints" + }, "DescribeInstanceCreditSpecifications": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 50921f1674a6..e3c1e8bb59ac 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -100,7 +100,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

      Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

      You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

      [EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

      An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per Region.

      For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

      You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

      If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation.

      For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

      " }, "AllocateHosts":{ "name":"AllocateHosts", @@ -120,7 +120,7 @@ }, "input":{"shape":"AllocateIpamPoolCidrRequest"}, "output":{"shape":"AllocateIpamPoolCidrResult"}, - "documentation":"

      Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.

      This action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.

      " + "documentation":"

      Allocate a CIDR from an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

      In IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.

      This action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.

      " }, "ApplySecurityGroupsToClientVpnTargetNetwork":{ "name":"ApplySecurityGroupsToClientVpnTargetNetwork", @@ -170,7 +170,7 @@ }, "input":{"shape":"AssociateAddressRequest"}, "output":{"shape":"AssociateAddressResult"}, - "documentation":"

      Associates an Elastic IP address, or carrier IP address (for instances that are in subnets in Wavelength Zones) with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

      An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

      [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

      [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication carrier to the instance or network interface.

      You cannot associate an Elastic IP address with an interface in a different network border group.

      This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      Associates an Elastic IP address, or carrier IP address (for instances that are in subnets in Wavelength Zones) with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

      If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

      [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication carrier to the instance or network interface.

      You cannot associate an Elastic IP address with an interface in a different network border group.

      This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

      " }, "AssociateClientVpnTargetNetwork":{ "name":"AssociateClientVpnTargetNetwork", @@ -676,7 +676,7 @@ }, "input":{"shape":"CreateFleetRequest"}, "output":{"shape":"CreateFleetResult"}, - "documentation":"

      Launches an EC2 Fleet.

      You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

      For more information, see EC2 Fleet in the Amazon EC2 User Guide.

      " + "documentation":"

      Creates an EC2 Fleet that contains the configuration information for On-Demand Instances and Spot Instances. Instances are launched immediately if there is available capacity.

      A single EC2 Fleet can include multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

      For more information, see EC2 Fleet in the Amazon EC2 User Guide.

      " }, "CreateFlowLogs":{ "name":"CreateFlowLogs", @@ -708,6 +708,16 @@ "output":{"shape":"CreateImageResult"}, "documentation":"

      Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

      By default, when Amazon EC2 creates the new AMI, it reboots the instance so that it can take snapshots of the attached volumes while data is at rest, in order to ensure a consistent state. You can set the NoReboot parameter to true in the API request, or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and rebooting the instance.

      If you choose to bypass the shutdown and reboot process by setting the NoReboot parameter to true in the API request, or by using the --no-reboot option in the CLI, we can't guarantee the file system integrity of the created image.

      If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

      For more information, see Create an Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide.

      " }, + "CreateInstanceConnectEndpoint":{ + "name":"CreateInstanceConnectEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceConnectEndpointRequest"}, + "output":{"shape":"CreateInstanceConnectEndpointResult"}, + "documentation":"

      Creates an EC2 Instance Connect Endpoint.

      An EC2 Instance Connect Endpoint allows you to connect to a resource, without requiring the resource to have a public IPv4 address. For more information, see Connect to your resources without requiring a public IPv4 address using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide.

      " + }, "CreateInstanceEventWindow":{ "name":"CreateInstanceEventWindow", "http":{ @@ -1463,6 +1473,16 @@ "output":{"shape":"DeleteFpgaImageResult"}, "documentation":"

      Deletes the specified Amazon FPGA Image (AFI).

      " }, + "DeleteInstanceConnectEndpoint":{ + "name":"DeleteInstanceConnectEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceConnectEndpointRequest"}, + "output":{"shape":"DeleteInstanceConnectEndpointResult"}, + "documentation":"

      Deletes the specified EC2 Instance Connect Endpoint.

      " + }, "DeleteInstanceEventWindow":{ "name":"DeleteInstanceEventWindow", "http":{ @@ -2142,7 +2162,7 @@ }, "input":{"shape":"DescribeAccountAttributesRequest"}, "output":{"shape":"DescribeAccountAttributesResult"}, - "documentation":"

      Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

      • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

      • default-vpc: The ID of the default VPC for your account, or none.

      • max-instances: This attribute is no longer supported. The returned value does not reflect your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic Compute Cloud User Guide.

      • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

      • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

      • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

      We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide.

      " + "documentation":"

      Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

      • default-vpc: The ID of the default VPC for your account, or none.

      • max-instances: This attribute is no longer supported. The returned value does not reflect your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic Compute Cloud User Guide.

      • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate.

      • supported-platforms: This attribute is deprecated.

      • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate.

      • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

      " }, "DescribeAddressTransfers":{ "name":"DescribeAddressTransfers", @@ -2152,7 +2172,7 @@ }, "input":{"shape":"DescribeAddressTransfersRequest"}, "output":{"shape":"DescribeAddressTransfersResult"}, - "documentation":"

      Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

      " + "documentation":"

      Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

      When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted.

      " }, "DescribeAddresses":{ "name":"DescribeAddresses", @@ -2162,7 +2182,7 @@ }, "input":{"shape":"DescribeAddressesRequest"}, "output":{"shape":"DescribeAddressesResult"}, - "documentation":"

      Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

      An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

      " }, "DescribeAddressesAttribute":{ "name":"DescribeAddressesAttribute", @@ -2584,6 +2604,16 @@ "output":{"shape":"InstanceAttribute"}, "documentation":"

      Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

      " }, + "DescribeInstanceConnectEndpoints":{ + "name":"DescribeInstanceConnectEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceConnectEndpointsRequest"}, + "output":{"shape":"DescribeInstanceConnectEndpointsResult"}, + "documentation":"

      Describes the specified EC2 Instance Connect Endpoints or all EC2 Instance Connect Endpoints.

      " + }, "DescribeInstanceCreditSpecifications":{ "name":"DescribeInstanceCreditSpecifications", "http":{ @@ -2832,7 +2862,7 @@ }, "input":{"shape":"DescribeMovingAddressesRequest"}, "output":{"shape":"DescribeMovingAddressesResult"}, - "documentation":"

      Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

      " + "documentation":"

      This action is deprecated.

      Describes your Elastic IP addresses that are being moved from or being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

      " }, "DescribeNatGateways":{ "name":"DescribeNatGateways", @@ -3757,7 +3787,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateAddressRequest"}, - "documentation":"

      Disassociates an Elastic IP address from the instance or network interface it's associated with.

      An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

      " + "documentation":"

      Disassociates an Elastic IP address from the instance or network interface it's associated with.

      This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

      " }, "DisassociateClientVpnTargetNetwork":{ "name":"DisassociateClientVpnTargetNetwork", @@ -4254,7 +4284,7 @@ }, "input":{"shape":"GetIpamPoolAllocationsRequest"}, "output":{"shape":"GetIpamPoolAllocationsResult"}, - "documentation":"

      Get a list of all the CIDR allocations in an IPAM pool.

      If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

      " + "documentation":"

      Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

      If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

      " }, "GetIpamPoolCidrs":{ "name":"GetIpamPoolCidrs", @@ -5225,7 +5255,7 @@ }, "input":{"shape":"MoveAddressToVpcRequest"}, "output":{"shape":"MoveAddressToVpcResult"}, - "documentation":"

      Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      This action is deprecated.

      Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

      " }, "MoveByoipCidrToIpam":{ "name":"MoveByoipCidrToIpam", @@ -5403,7 +5433,7 @@ "requestUri":"/" }, "input":{"shape":"ReleaseAddressRequest"}, - "documentation":"

      Releases the specified Elastic IP address.

      [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

      After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another Amazon Web Services account.

      [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

      For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      Releases the specified Elastic IP address.

      [Default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

      [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

      After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another Amazon Web Services account.

      After you release an Elastic IP address, you might be able to recover it. For more information, see AllocateAddress.

      " }, "ReleaseHosts":{ "name":"ReleaseHosts", @@ -5423,7 +5453,7 @@ }, "input":{"shape":"ReleaseIpamPoolAllocationRequest"}, "output":{"shape":"ReleaseIpamPoolAllocationResult"}, - "documentation":"

      Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

      All EC2 API actions follow an eventual consistency model.

      " + "documentation":"

      Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

      All EC2 API actions follow an eventual consistency model.

      " }, "ReplaceIamInstanceProfileAssociation":{ "name":"ReplaceIamInstanceProfileAssociation", @@ -5596,7 +5626,7 @@ }, "input":{"shape":"RestoreAddressToClassicRequest"}, "output":{"shape":"RestoreAddressToClassicResult"}, - "documentation":"

      Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      This action is deprecated.

      Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

      " }, "RestoreImageFromRecycleBin":{ "name":"RestoreImageFromRecycleBin", @@ -6500,17 +6530,17 @@ }, "AllocationId":{ "shape":"String", - "documentation":"

      The ID representing the allocation of the address for use with EC2-VPC.

      ", + "documentation":"

      The ID representing the allocation of the address.

      ", "locationName":"allocationId" }, "AssociationId":{ "shape":"String", - "documentation":"

      The ID representing the association of the address with an instance in a VPC.

      ", + "documentation":"

      The ID representing the association of the address with an instance.

      ", "locationName":"associationId" }, "Domain":{ "shape":"DomainType", - "documentation":"

      Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

      ", + "documentation":"

      The network (vpc).

      ", "locationName":"domain" }, "NetworkInterfaceId":{ @@ -6704,11 +6734,11 @@ "members":{ "Domain":{ "shape":"DomainType", - "documentation":"

      Indicates whether the Elastic IP address is for use with instances in a VPC or instances in EC2-Classic.

      Default: If the Region supports EC2-Classic, the default is standard. Otherwise, the default is vpc.

      " + "documentation":"

      The network (vpc).

      " }, "Address":{ "shape":"PublicIpAddress", - "documentation":"

      [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool.

      " + "documentation":"

      The Elastic IP address to recover or an IPv4 address from an address pool.

      " }, "PublicIpv4Pool":{ "shape":"Ipv4PoolEc2Id", @@ -6744,7 +6774,7 @@ }, "AllocationId":{ "shape":"String", - "documentation":"

      [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

      ", + "documentation":"

      The ID that represents the allocation of the Elastic IP address.

      ", "locationName":"allocationId" }, "PublicIpv4Pool":{ @@ -6759,7 +6789,7 @@ }, "Domain":{ "shape":"DomainType", - "documentation":"

      Indicates whether the Elastic IP address is for use with instances in a VPC (vpc) or instances in EC2-Classic (standard).

      ", + "documentation":"

      The network (vpc).

      ", "locationName":"domain" }, "CustomerOwnedIp":{ @@ -6774,17 +6804,14 @@ }, "CarrierIp":{ "shape":"String", - "documentation":"

      The carrier IP address. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance).

      ", + "documentation":"

      The carrier IP address. This option is only available for network interfaces that reside in a subnet in a Wavelength Zone.

      ", "locationName":"carrierIp" } } }, "AllocateHostsRequest":{ "type":"structure", - "required":[ - "AvailabilityZone", - "Quantity" - ], + "required":["AvailabilityZone"], "members":{ "AutoPlacement":{ "shape":"AutoPlacement", @@ -6812,7 +6839,7 @@ }, "Quantity":{ "shape":"Integer", - "documentation":"

      The number of Dedicated Hosts to allocate to your account with these parameters.

      ", + "documentation":"

      The number of Dedicated Hosts to allocate to your account with these parameters. If you are allocating the Dedicated Hosts on an Outpost, and you specify AssetIds, you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset. If you specify both AssetIds and Quantity, then the value that you specify for Quantity must be equal to the number of asset IDs specified.

      ", "locationName":"quantity" }, "TagSpecifications":{ @@ -6826,11 +6853,16 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on which to allocate the Dedicated Host.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on which to allocate the Dedicated Host. If you specify OutpostArn, you can optionally specify AssetIds.

      If you are allocating the Dedicated Host in a Region, omit this parameter.

      " }, "HostMaintenance":{ "shape":"HostMaintenance", "documentation":"

      Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide.

      " + }, + "AssetIds":{ + "shape":"AssetIdList", + "documentation":"

      The IDs of the Outpost hardware assets on which to allocate the Dedicated Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency between your workloads. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter.

      • If you specify this parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset.

      • If you specify both AssetIds and Quantity, then the value for Quantity must be equal to the number of asset IDs specified.

      ", + "locationName":"AssetId" } } }, @@ -7369,6 +7401,11 @@ "locationName":"item" } }, + "AssetId":{"type":"string"}, + "AssetIdList":{ + "type":"list", + "member":{"shape":"AssetId"} + }, "AssignIpv6AddressesRequest":{ "type":"structure", "required":["NetworkInterfaceId"], @@ -7536,19 +7573,19 @@ "members":{ "AllocationId":{ "shape":"AllocationId", - "documentation":"

      [EC2-VPC] The allocation ID. This is required for EC2-VPC.

      " + "documentation":"

      The allocation ID. This is required.

      " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

      The ID of the instance. The instance must have exactly one attached network interface. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. For EC2-Classic, you must specify an instance ID and the instance must be in the running state.

      " + "documentation":"

      The ID of the instance. The instance must have exactly one attached network interface. You can specify either the instance ID or the network interface ID, but not both.

      " }, "PublicIp":{ "shape":"EipAllocationPublicIp", - "documentation":"

      [EC2-Classic] The Elastic IP address to associate with the instance. This is required for EC2-Classic.

      " + "documentation":"

      Deprecated.

      " }, "AllowReassociation":{ "shape":"Boolean", - "documentation":"

      [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

      ", + "documentation":"

      Reassociation is automatic, but you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

      ", "locationName":"allowReassociation" }, "DryRun":{ @@ -7558,12 +7595,12 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

      [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

      For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both.

      ", + "documentation":"

      The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

      You can specify either the instance ID or the network interface ID, but not both.

      ", "locationName":"networkInterfaceId" }, "PrivateIpAddress":{ "shape":"String", - "documentation":"

      [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

      ", + "documentation":"

      The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

      ", "locationName":"privateIpAddress" } } @@ -7573,7 +7610,7 @@ "members":{ "AssociationId":{ "shape":"String", - "documentation":"

      [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

      ", + "documentation":"

      The ID that represents the association of the Elastic IP address with an instance.

      ", "locationName":"associationId" } } @@ -8830,6 +8867,7 @@ }, "documentation":"

      Describes Availability Zones, Local Zones, and Wavelength Zones.

      " }, + "AvailabilityZoneId":{"type":"string"}, "AvailabilityZoneList":{ "type":"list", "member":{ @@ -11412,7 +11450,7 @@ }, "AmdSevSnp":{ "shape":"AmdSevSnpSpecification", - "documentation":"

      Indicates whether the instance is enabled for AMD SEV-SNP.

      ", + "documentation":"

      Indicates whether the instance is enabled for AMD SEV-SNP. For more information, see AMD SEV-SNP.

      ", "locationName":"amdSevSnp" } }, @@ -11431,7 +11469,7 @@ }, "AmdSevSnp":{ "shape":"AmdSevSnpSpecification", - "documentation":"

      Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only.

      " + "documentation":"

      Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. For more information, see AMD SEV-SNP.

      " } }, "documentation":"

      The CPU options for the instance. Both the core count and threads per core must be specified in the request.

      " @@ -11565,11 +11603,11 @@ "documentation":"

      The type of operating system for which to reserve capacity.

      " }, "AvailabilityZone":{ - "shape":"String", + "shape":"AvailabilityZoneName", "documentation":"

      The Availability Zone in which to create the Capacity Reservation.

      " }, "AvailabilityZoneId":{ - "shape":"String", + "shape":"AvailabilityZoneId", "documentation":"

      The ID of the Availability Zone in which to create the Capacity Reservation.

      " }, "Tenancy":{ @@ -12390,6 +12428,54 @@ } } }, + "CreateInstanceConnectEndpointRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

      Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

      " + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

      The ID of the subnet in which to create the EC2 Instance Connect Endpoint.

      " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringListRequest", + "documentation":"

      One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint.

      ", + "locationName":"SecurityGroupId" + }, + "PreserveClientIp":{ + "shape":"Boolean", + "documentation":"

      Indicates whether your client's IP address is preserved as the source. The value is true or false.

      • If true, your client's IP address is used when you connect to a resource.

      • If false, the elastic network interface IP address is used when you connect to a resource.

      Default: true

      " + }, + "ClientToken":{ + "shape":"String", + "documentation":"

      Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

      ", + "idempotencyToken":true + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

      The tags to apply to the EC2 Instance Connect Endpoint during creation.

      ", + "locationName":"TagSpecification" + } + } + }, + "CreateInstanceConnectEndpointResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoint":{ + "shape":"Ec2InstanceConnectEndpoint", + "documentation":"

      Information about the EC2 Instance Connect Endpoint.

      ", + "locationName":"instanceConnectEndpoint" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

      Unique, case-sensitive idempotency token provided by the client in the the request.

      ", + "locationName":"clientToken" + } + } + }, "CreateInstanceEventWindowRequest":{ "type":"structure", "members":{ @@ -13397,7 +13483,7 @@ }, "InterfaceType":{ "shape":"NetworkInterfaceCreationType", - "documentation":"

      The type of network interface. The default is interface.

      The only supported values are efa and trunk.

      " + "documentation":"

      The type of network interface. The default is interface.

      The only supported values are interface, efa, and trunk.

      " }, "SubnetId":{ "shape":"SubnetId", @@ -16150,6 +16236,30 @@ } } }, + "DeleteInstanceConnectEndpointRequest":{ + "type":"structure", + "required":["InstanceConnectEndpointId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

      Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

      " + }, + "InstanceConnectEndpointId":{ + "shape":"InstanceConnectEndpointId", + "documentation":"

      The ID of the EC2 Instance Connect Endpoint to delete.

      " + } + } + }, + "DeleteInstanceConnectEndpointResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoint":{ + "shape":"Ec2InstanceConnectEndpoint", + "documentation":"

      Information about the EC2 Instance Connect Endpoint.

      ", + "locationName":"instanceConnectEndpoint" + } + } + }, "DeleteInstanceEventWindowRequest":{ "type":"structure", "required":["InstanceEventWindowId"], @@ -17816,6 +17926,7 @@ }, "DeregisterInstanceEventNotificationAttributesRequest":{ "type":"structure", + "required":["InstanceTagAttribute"], "members":{ "DryRun":{ "shape":"Boolean", @@ -18027,7 +18138,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

      One or more filters. Filter names and values are case-sensitive.

      • allocation-id - [EC2-VPC] The allocation ID for the address.

      • association-id - [EC2-VPC] The association ID for the address.

      • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

      • instance-id - The ID of the instance the address is associated with, if any.

      • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

      • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

      • network-interface-owner-id - The Amazon Web Services account ID of the owner.

      • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

      • public-ip - The Elastic IP address, or the carrier IP address.

      • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

      • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

      ", + "documentation":"

      One or more filters. Filter names and values are case-sensitive.

      • allocation-id - The allocation ID for the address.

      • association-id - The association ID for the address.

      • instance-id - The ID of the instance the address is associated with, if any.

      • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

      • network-interface-id - The ID of the network interface that the address is associated with, if any.

      • network-interface-owner-id - The Amazon Web Services account ID of the owner.

      • private-ip-address - The private IP address associated with the Elastic IP address.

      • public-ip - The Elastic IP address, or the carrier IP address.

      • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

      • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

      ", "locationName":"Filter" }, "PublicIps":{ @@ -18037,7 +18148,7 @@ }, "AllocationIds":{ "shape":"AllocationIdList", - "documentation":"

      [EC2-VPC] Information about the allocation IDs.

      ", + "documentation":"

      Information about the allocation IDs.

      ", "locationName":"AllocationId" }, "DryRun":{ @@ -19936,6 +20047,48 @@ } } }, + "DescribeInstanceConnectEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

      Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

      " + }, + "MaxResults":{ + "shape":"InstanceConnectEndpointMaxResults", + "documentation":"

      The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

      " + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

      One or more filters.

      • instance-connect-endpoint-id - The ID of the EC2 Instance Connect Endpoint.

      • state - The state of the EC2 Instance Connect Endpoint (create-in-progress | create-complete | create-failed | delete-in-progress | delete-complete | delete-failed).

      • subnet-id - The ID of the subnet in which the EC2 Instance Connect Endpoint was created.

      • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

      • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

      • tag-value - The value of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific value, regardless of tag key.

      • vpc-id - The ID of the VPC in which the EC2 Instance Connect Endpoint was created.

      ", + "locationName":"Filter" + }, + "InstanceConnectEndpointIds":{ + "shape":"ValueStringList", + "documentation":"

      One or more EC2 Instance Connect Endpoint IDs.

      ", + "locationName":"InstanceConnectEndpointId" + } + } + }, + "DescribeInstanceConnectEndpointsResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoints":{ + "shape":"InstanceConnectEndpointSet", + "documentation":"

      Information about the EC2 Instance Connect Endpoints.

      ", + "locationName":"instanceConnectEndpointSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      The token to include in another request to get the next page of items. This value is null when there are no more items to return.

      ", + "locationName":"nextToken" + } + } + }, "DescribeInstanceCreditSpecificationsMaxResults":{ "type":"integer", "max":1000, @@ -20148,7 +20301,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

      One or more filters. Filter names and values are case-sensitive.

      • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

      • bare-metal - Indicates whether it is a bare metal instance type (true | false).

      • burstable-performance-supported - Indicates whether it is a burstable performance instance type (true | false).

      • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

      • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

      • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

      • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

      • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

      • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

      • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

      • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

      • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

      • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

      • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

      • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

      • hypervisor - The hypervisor (nitro | xen).

      • instance-storage-info.disk.count - The number of local disks.

      • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

      • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

      • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

      • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

      • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

      • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

      • instance-type - The instance type (for example c5.2xlarge or c5*).

      • memory-info.size-in-mib - The memory size.

      • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

      • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

      • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

      • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

      • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

      • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

      • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

      • network-info.maximum-network-cards - The maximum number of network cards per instance.

      • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

      • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

      • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

      • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

      • supported-boot-mode - The boot mode (legacy-bios | uefi).

      • supported-root-device-type - The root device type (ebs | instance-store).

      • supported-usage-class - The usage class (on-demand | spot).

      • supported-virtualization-type - The virtualization type (hvm | paravirtual).

      • vcpu-info.default-cores - The default number of cores for the instance type.

      • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

      • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

      • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

      • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

      ", + "documentation":"

      One or more filters. Filter names and values are case-sensitive.

      • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

      • bare-metal - Indicates whether it is a bare metal instance type (true | false).

      • burstable-performance-supported - Indicates whether it is a burstable performance instance type (true | false).

      • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

      • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

      • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

      • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

      • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

      • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

      • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

      • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

      • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

      • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

      • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

      • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

      • hypervisor - The hypervisor (nitro | xen).

      • instance-storage-info.disk.count - The number of local disks.

      • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

      • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

      • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

      • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

      • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

      • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

      • instance-type - The instance type (for example c5.2xlarge or c5*).

      • memory-info.size-in-mib - The memory size.

      • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

      • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

      • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

      • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

      • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

      • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

      • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

      • network-info.maximum-network-cards - The maximum number of network cards per instance.

      • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

      • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

      • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

      • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

      • processor-info.supported-features - The supported CPU features (amd-sev-snp).

      • supported-boot-mode - The boot mode (legacy-bios | uefi).

      • supported-root-device-type - The root device type (ebs | instance-store).

      • supported-usage-class - The usage class (on-demand | spot).

      • supported-virtualization-type - The virtualization type (hvm | paravirtual).

      • vcpu-info.default-cores - The default number of cores for the instance type.

      • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

      • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

      • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

      • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

      ", "locationName":"Filter" }, "MaxResults":{ @@ -20166,7 +20319,7 @@ "members":{ "InstanceTypes":{ "shape":"InstanceTypeInfoList", - "documentation":"

      The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

      ", + "documentation":"

      The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

      When you change your EBS-backed instance type, instance restart or replacement behavior depends on the instance type compatibility between the old and new types. An instance that's backed by an instance store volume is always replaced. For more information, see Change the instance type in the Amazon EC2 User Guide.

      ", "locationName":"instanceTypeSet" }, "NextToken":{ @@ -25146,11 +25299,11 @@ "members":{ "AssociationId":{ "shape":"ElasticIpAssociationId", - "documentation":"

      [EC2-VPC] The association ID. Required for EC2-VPC.

      " + "documentation":"

      The association ID. This parameter is required.

      " }, "PublicIp":{ "shape":"EipAllocationPublicIp", - "documentation":"

      [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

      " + "documentation":"

      Deprecated.

      " }, "DryRun":{ "shape":"Boolean", @@ -25976,6 +26129,98 @@ "default" ] }, + "Ec2InstanceConnectEndpoint":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "documentation":"

      The ID of the Amazon Web Services account that created the EC2 Instance Connect Endpoint.

      ", + "locationName":"ownerId" + }, + "InstanceConnectEndpointId":{ + "shape":"InstanceConnectEndpointId", + "documentation":"

      The ID of the EC2 Instance Connect Endpoint.

      ", + "locationName":"instanceConnectEndpointId" + }, + "InstanceConnectEndpointArn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon Resource Name (ARN) of the EC2 Instance Connect Endpoint.

      ", + "locationName":"instanceConnectEndpointArn" + }, + "State":{ + "shape":"Ec2InstanceConnectEndpointState", + "documentation":"

      The current state of the EC2 Instance Connect Endpoint.

      ", + "locationName":"state" + }, + "StateMessage":{ + "shape":"String", + "documentation":"

      The message for the current state of the EC2 Instance Connect Endpoint. Can include a failure message.

      ", + "locationName":"stateMessage" + }, + "DnsName":{ + "shape":"String", + "documentation":"

      The DNS name of the EC2 Instance Connect Endpoint.

      ", + "locationName":"dnsName" + }, + "FipsDnsName":{ + "shape":"String", + "documentation":"

      ", + "locationName":"fipsDnsName" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdSet", + "documentation":"

      The ID of the elastic network interface that Amazon EC2 automatically created when creating the EC2 Instance Connect Endpoint.

      ", + "locationName":"networkInterfaceIdSet" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

      The ID of the VPC in which the EC2 Instance Connect Endpoint was created.

      ", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

      The Availability Zone of the EC2 Instance Connect Endpoint.

      ", + "locationName":"availabilityZone" + }, + "CreatedAt":{ + "shape":"MillisecondDateTime", + "documentation":"

      The date and time that the EC2 Instance Connect Endpoint was created.

      ", + "locationName":"createdAt" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

      The ID of the subnet in which the EC2 Instance Connect Endpoint was created.

      ", + "locationName":"subnetId" + }, + "PreserveClientIp":{ + "shape":"Boolean", + "documentation":"

      Indicates whether your client's IP address is preserved as the source. The value is true or false.

      • If true, your client's IP address is used when you connect to a resource.

      • If false, the elastic network interface IP address is used when you connect to a resource.

      Default: true

      ", + "locationName":"preserveClientIp" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdSet", + "documentation":"

      The security groups associated with the endpoint. If you didn't specify a security group, the default security group for your VPC is associated with the endpoint.

      ", + "locationName":"securityGroupIdSet" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

      The tags assigned to the EC2 Instance Connect Endpoint.

      ", + "locationName":"tagSet" + } + }, + "documentation":"

      The EC2 Instance Connect Endpoint.

      " + }, + "Ec2InstanceConnectEndpointState":{ + "type":"string", + "enum":[ + "create-in-progress", + "create-complete", + "create-failed", + "delete-in-progress", + "delete-complete", + "delete-failed" + ] + }, "EfaInfo":{ "type":"structure", "members":{ @@ -28242,7 +28487,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

      The ID of the AMI. An AMI is required to launch an instance. The AMI ID must be specified here or in the launch template.

      ", + "documentation":"

      The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

      ", "locationName":"imageId" } }, @@ -28299,7 +28544,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

      The ID of the AMI. An AMI is required to launch an instance. The AMI ID must be specified here or in the launch template.

      " + "documentation":"

      The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

      " } }, "documentation":"

      Describes overrides for a launch template.

      " @@ -30842,6 +31087,11 @@ "shape":"HostMaintenance", "documentation":"

      Indicates whether host maintenance is enabled or disabled for the Dedicated Host.

      ", "locationName":"hostMaintenance" + }, + "AssetId":{ + "shape":"AssetId", + "documentation":"

      The ID of the Outpost hardware asset on which the Dedicated Host is allocated.

      ", + "locationName":"assetId" } }, "documentation":"

      Describes the properties of the Dedicated Host.

      " @@ -31528,7 +31778,7 @@ "documentation":"

      The ID of the EBS snapshot to be used for importing the snapshot.

      " }, "Url":{ - "shape":"String", + "shape":"SensitiveUrl", "documentation":"

      The URL to the Amazon S3-based disk image being imported. The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..)

      " }, "UserBucket":{ @@ -32917,6 +33167,19 @@ }, "documentation":"

      Information about the number of instances that can be launched onto the Dedicated Host.

      " }, + "InstanceConnectEndpointId":{"type":"string"}, + "InstanceConnectEndpointMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "InstanceConnectEndpointSet":{ + "type":"list", + "member":{ + "shape":"Ec2InstanceConnectEndpoint", + "locationName":"item" + } + }, "InstanceCount":{ "type":"structure", "members":{ @@ -35005,7 +35268,13 @@ "inf2.8xlarge", "inf2.24xlarge", "inf2.48xlarge", - "trn1n.32xlarge" + "trn1n.32xlarge", + "i4g.large", + "i4g.xlarge", + "i4g.2xlarge", + "i4g.4xlarge", + "i4g.8xlarge", + "i4g.16xlarge" ] }, "InstanceTypeHypervisor":{ @@ -37150,7 +37419,7 @@ "members":{ "LaunchTemplateSpecification":{ "shape":"FleetLaunchTemplateSpecification", - "documentation":"

      The launch template.

      ", + "documentation":"

      The launch template to use. Make sure that the launch template does not contain the NetworkInterfaceId parameter because you can't specify a network interface ID in a Spot Fleet.

      ", "locationName":"launchTemplateSpecification" }, "Overrides":{ @@ -39984,7 +40253,7 @@ }, "Tenancy":{ "shape":"HostTenancy", - "documentation":"

      The tenancy for the instance.

      For T3 instances, you can't change the tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these unsupported tenancy changes results in the InvalidTenancy error code.

      ", + "documentation":"

      The tenancy for the instance.

      For T3 instances, you must launch the instance on a Dedicated Host to use a tenancy of host. You can't change the tenancy from host to dedicated or default. Attempting to make one of these unsupported tenancy changes results in an InvalidRequest error code.

      ", "locationName":"tenancy" }, "PartitionNumber":{ @@ -39993,7 +40262,7 @@ }, "HostResourceGroupArn":{ "shape":"String", - "documentation":"

      The ARN of the host resource group in which to place the instance.

      " + "documentation":"

      The ARN of the host resource group in which to place the instance. The instance must have a tenancy of host to specify this parameter.

      " }, "GroupId":{ "shape":"PlacementGroupId", @@ -41885,7 +42154,7 @@ "documentation":"

      The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.

      Constraints: A size /126 CIDR block from the local fd00::/8 range.

      " }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

      The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

      Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

      " }, "Phase1LifetimeSeconds":{ @@ -41964,7 +42233,8 @@ "documentation":"

      Turn on or off tunnel endpoint lifecycle control feature.

      " } }, - "documentation":"

      The Amazon Web Services Site-to-Site VPN tunnel options to modify.

      " + "documentation":"

      The Amazon Web Services Site-to-Site VPN tunnel options to modify.

      ", + "sensitive":true }, "MonitorInstancesRequest":{ "type":"structure", @@ -42091,7 +42361,7 @@ "members":{ "MoveStatus":{ "shape":"MoveStatus", - "documentation":"

      The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

      ", + "documentation":"

      The status of the Elastic IP address that's being moved or restored.

      ", "locationName":"moveStatus" }, "PublicIp":{ @@ -42100,7 +42370,7 @@ "locationName":"publicIp" } }, - "documentation":"

      Describes the status of a moving Elastic IP address.

      We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

      " + "documentation":"

      This action is deprecated.

      Describes the status of a moving Elastic IP address.

      " }, "MovingAddressStatusSet":{ "type":"list", @@ -43181,6 +43451,13 @@ "locationName":"item" } }, + "NetworkInterfaceIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "NetworkInterfaceIpv6Address":{ "type":"structure", "members":{ @@ -44815,7 +45092,7 @@ }, "SupportedFeatures":{ "shape":"SupportedAdditionalProcessorFeatureList", - "documentation":"

      Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported.

      ", + "documentation":"

      Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. For more information, see AMD SEV-SNP.

      ", "locationName":"supportedFeatures" } }, @@ -45643,6 +45920,7 @@ }, "RegisterInstanceEventNotificationAttributesRequest":{ "type":"structure", + "required":["InstanceTagAttribute"], "members":{ "DryRun":{ "shape":"Boolean", @@ -45891,11 +46169,11 @@ "members":{ "AllocationId":{ "shape":"AllocationId", - "documentation":"

      [EC2-VPC] The allocation ID. Required for EC2-VPC.

      " + "documentation":"

      The allocation ID. This parameter is required.

      " }, "PublicIp":{ "shape":"String", - "documentation":"

      [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

      " + "documentation":"

      Deprecated.

      " }, "NetworkBorderGroup":{ "shape":"String", @@ -46587,7 +46865,7 @@ "documentation":"

      Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).

      Default: stop

      " }, "UserData":{ - "shape":"String", + "shape":"SensitiveUserData", "documentation":"

      The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Run commands on your Linux instance at launch (Linux) or Work with instance user data (Windows) in the Amazon Elastic Compute Cloud User Guide.

      If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.

      " }, "TagSpecifications":{ @@ -46665,8 +46943,7 @@ "documentation":"

      Indicates whether to enable the instance for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide.

      " } }, - "documentation":"

      The information to include in the launch template.

      You must specify at least one parameter for the launch template data.

      ", - "sensitive":true + "documentation":"

      The information to include in the launch template.

      You must specify at least one parameter for the launch template data.

      " }, "RequestSpotFleetRequest":{ "type":"structure", @@ -47764,7 +48041,8 @@ "vpn-connection-device-type", "vpc-block-public-access-exclusion", "ipam-resource-discovery", - "ipam-resource-discovery-association" + "ipam-resource-discovery-association", + "instance-connect-endpoint" ] }, "ResponseError":{ @@ -48707,7 +48985,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

      The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

      Default: m1.small

      " + "documentation":"

      The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

      When you change your EBS-backed instance type, instance restart or replacement behavior depends on the instance type compatibility between the old and new types. An instance that's backed by an instance store volume is always replaced. For more information, see Change the instance type in the Amazon EC2 User Guide.

      Default: m1.small

      " }, "Ipv6AddressCount":{ "shape":"Integer", @@ -49649,6 +49927,13 @@ "locationName":"item" } }, + "SecurityGroupIdSet":{ + "type":"list", + "member":{ + "shape":"SecurityGroupId", + "locationName":"item" + } + }, "SecurityGroupIdStringList":{ "type":"list", "member":{ @@ -49656,6 +49941,15 @@ "locationName":"SecurityGroupId" } }, + "SecurityGroupIdStringListRequest":{ + "type":"list", + "member":{ + "shape":"SecurityGroupId", + "locationName":"SecurityGroupId" + }, + "max":16, + "min":0 + }, "SecurityGroupIdentifier":{ "type":"structure", "members":{ @@ -49903,6 +50197,10 @@ } } }, + "SensitiveUrl":{ + "type":"string", + "sensitive":true + }, "SensitiveUserData":{ "type":"string", "sensitive":true @@ -50307,7 +50605,7 @@ "locationName":"statusMessage" }, "Url":{ - "shape":"String", + "shape":"SensitiveUrl", "documentation":"

      The URL used to access the disk image.

      ", "locationName":"url" }, @@ -50338,7 +50636,7 @@ "documentation":"

      The format of the disk image being imported.

      Valid values: VHD | VMDK | RAW

      " }, "Url":{ - "shape":"String", + "shape":"SensitiveUrl", "documentation":"

      The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).

      " }, "UserBucket":{ @@ -50528,7 +50826,7 @@ "locationName":"statusMessage" }, "Url":{ - "shape":"String", + "shape":"SensitiveUrl", "documentation":"

      The URL of the disk image from which the snapshot is created.

      ", "locationName":"url" }, @@ -54806,7 +55104,7 @@ "locationName":"tunnelInsideIpv6Cidr" }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

      The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

      ", "locationName":"preSharedKey" }, @@ -55902,9 +56200,17 @@ "KinesisDataFirehose":{ "shape":"VerifiedAccessLogKinesisDataFirehoseDestinationOptions", "documentation":"

      Sends Verified Access logs to Kinesis.

      " + }, + "LogVersion":{ + "shape":"String", + "documentation":"

      The logging version to use.

      Valid values: ocsf-0.1 | ocsf-1.0.0-rc.2

      " + }, + "IncludeTrustContext":{ + "shape":"Boolean", + "documentation":"

      Include trust data sent by trust providers into the logs.

      " } }, - "documentation":"

      Describes the destinations for Verified Access logs.

      " + "documentation":"

      Options for Verified Access logs.

      " }, "VerifiedAccessLogS3Destination":{ "type":"structure", @@ -55977,9 +56283,19 @@ "shape":"VerifiedAccessLogKinesisDataFirehoseDestination", "documentation":"

      Kinesis logging destination.

      ", "locationName":"kinesisDataFirehose" + }, + "LogVersion":{ + "shape":"String", + "documentation":"

      Describes current setting for the logging version.

      ", + "locationName":"logVersion" + }, + "IncludeTrustContext":{ + "shape":"Boolean", + "documentation":"

      Describes current setting for including trust data into the logs.

      ", + "locationName":"includeTrustContext" } }, - "documentation":"

      Describes the destinations for Verified Access logs.

      " + "documentation":"

      Describes the options for Verified Access logs.

      " }, "VerifiedAccessTrustProvider":{ "type":"structure", @@ -57285,7 +57601,7 @@ "type":"structure", "members":{ "CustomerGatewayConfiguration":{ - "shape":"String", + "shape":"customerGatewayConfiguration", "documentation":"

      The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

      ", "locationName":"customerGatewayConfiguration" }, @@ -57659,7 +57975,7 @@ "documentation":"

      The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.

      Constraints: A size /126 CIDR block from the local fd00::/8 range.

      " }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

      The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

      Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

      " }, "Phase1LifetimeSeconds":{ @@ -57794,6 +58110,14 @@ "locationName":"ZoneName" } }, + "customerGatewayConfiguration":{ + "type":"string", + "sensitive":true + }, + "preSharedKey":{ + "type":"string", + "sensitive":true + }, "scope":{ "type":"string", "enum":[ diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index d8603e2f8aac..e5f3e5967d07 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index bf0524004195..6def0c456ada 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index da48fa5443ab..f3c96efbc93b 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index ace5af8f040a..59f9f0dc1eb2 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index f4aa7f721a42..608d286bcb10 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -186,7 +186,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServerException"} ], - "documentation":"

      Deletes one or more task definitions.

      You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

      When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

      You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

      A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

      " + "documentation":"

      Deletes one or more task definitions.

      You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

      When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

      You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

      A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

      When you delete all INACTIVE task definition revisions, the task definition name is not displayed in the console and not returned in the API. If a task definition revisions are in the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and returned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the next time you create a task definition with that name.

      " }, "DeleteTaskSet":{ "name":"DeleteTaskSet", @@ -1463,7 +1463,7 @@ }, "imageDigest":{ "shape":"String", - "documentation":"

      The container image manifest digest.

      The imageDigest is only returned if the container is using an image hosted in Amazon ECR, otherwise it is omitted.

      " + "documentation":"

      The container image manifest digest.

      " }, "runtimeId":{ "shape":"String", @@ -2012,7 +2012,7 @@ }, "desiredCount":{ "shape":"BoxedInteger", - "documentation":"

      The number of instantiations of the specified task definition to place and keep running on your cluster.

      This is required if schedulingStrategy is REPLICA or isn't specified. If schedulingStrategy is DAEMON then this isn't required.

      " + "documentation":"

      The number of instantiations of the specified task definition to place and keep running in your service.

      This is required if schedulingStrategy is REPLICA or isn't specified. If schedulingStrategy is DAEMON then this isn't required.

      " }, "clientToken":{ "shape":"String", @@ -2068,11 +2068,11 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

      Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

      " + "documentation":"

      Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

      When you use Amazon ECS managed tags, you need to set the propagateTags request parameter.

      " }, "propagateTags":{ "shape":"PropagateTags", - "documentation":"

      Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

      " + "documentation":"

      Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

      The default is NONE.

      " }, "enableExecuteCommand":{ "shape":"Boolean", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index 027fc3a89554..4b270dc7939f 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/service-2.json b/services/efs/src/main/resources/codegen-resources/service-2.json index d7e1b151d717..1fb57e8f110c 100644 --- a/services/efs/src/main/resources/codegen-resources/service-2.json +++ b/services/efs/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"AccessPointLimitExceeded"}, {"shape":"ThrottlingException"} ], - "documentation":"

      Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

      If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

      This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

      " + "documentation":"

      Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

      If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

      This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

      Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

      " }, "CreateFileSystem":{ "name":"CreateFileSystem", @@ -49,7 +49,7 @@ {"shape":"ThroughputLimitExceeded"}, {"shape":"UnsupportedAvailabilityZone"} ], - "documentation":"

      Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following:

      • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating.

      • Returns with the description of the created file system.

      Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

      For basic use cases, you can use a randomly generated UUID for the creation token.

      The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

      For more information, see Creating a file system in the Amazon EFS User Guide.

      The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

      This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

      You can set the throughput mode for the file system using the ThroughputMode parameter.

      After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

      This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

      " + "documentation":"

      Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following:

      • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating.

      • Returns with the description of the created file system.

      Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

      For basic use cases, you can use a randomly generated UUID for the creation token.

      The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

      For more information, see Creating a file system in the Amazon EFS User Guide.

      The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

      This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

      You can set the throughput mode for the file system using the ThroughputMode parameter.

      After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

      This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

      File systems can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

      " }, "CreateMountTarget":{ "name":"CreateMountTarget", @@ -1275,7 +1275,7 @@ "members":{ "Status":{ "shape":"ReplicationStatus", - "documentation":"

      Describes the status of the destination Amazon EFS file system. If the status is ERROR, the destination file system in the replication configuration is in a failed state and is unrecoverable. To access the file system data, restore a backup of the failed file system to a new file system.

      " + "documentation":"

      Describes the status of the destination Amazon EFS file system.

      • The Paused state occurs as a result of opting out of the source or destination Region after the replication configuration was created. To resume replication for the file system, you need to again opt in to the Amazon Web Services Region. For more information, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference Guide.

      • The Error state occurs when either the source or the destination file system (or both) is in a failed state and is unrecoverable. For more information, see Monitoring replication status in the Amazon EFS User Guide. You must delete the replication configuration, and then restore the most recent backup of the failed file system (either the source or the destination) to a new file system.

      " }, "FileSystemId":{ "shape":"FileSystemId", diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 40d1f0d9669a..5d3b95787f56 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index cc3a2efb58ff..73874ae59d69 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 85d1e1e0b8c7..d30c7b815def 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 792149db2e5b..1b254fa93a84 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index a88070ff73de..d6d1dd212563 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index f08ca7272430..7c03d3466328 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index f595e2a51d82..e12d5e138470 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index c6f1222f64d7..d68809e3c2ff 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 67b49cc30bcb..2069fb032b2b 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/paginators-1.json b/services/emr/src/main/resources/codegen-resources/paginators-1.json index 5ea61f92522c..a9d0a237da06 100644 --- a/services/emr/src/main/resources/codegen-resources/paginators-1.json +++ b/services/emr/src/main/resources/codegen-resources/paginators-1.json @@ -57,6 +57,10 @@ "input_token": "Marker", "output_token": "Marker", "result_key": "Studios" + }, + "ListSupportedInstanceTypes": { + "input_token": "Marker", + "output_token": "Marker" } } } \ No newline at end of file diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index 31d179449d33..403ad367505c 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -476,6 +476,20 @@ ], "documentation":"

      Returns a list of all Amazon EMR Studios associated with the Amazon Web Services account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

      " }, + "ListSupportedInstanceTypes":{ + "name":"ListSupportedInstanceTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSupportedInstanceTypesInput"}, + "output":{"shape":"ListSupportedInstanceTypesOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

      A list of the instance types that Amazon EMR supports. You can filter the list by Amazon Web Services Region and Amazon EMR release.

      " + }, "ModifyCluster":{ "name":"ModifyCluster", "http":{ @@ -2100,6 +2114,7 @@ }, "documentation":"

      The details of the step failure. The service attempts to detect the root cause for many common failures.

      " }, + "Float":{"type":"float"}, "GetAutoTerminationPolicyInput":{ "type":"structure", "required":["ClusterId"], @@ -3803,6 +3818,33 @@ } } }, + "ListSupportedInstanceTypesInput":{ + "type":"structure", + "required":["ReleaseLabel"], + "members":{ + "ReleaseLabel":{ + "shape":"String", + "documentation":"

      The Amazon EMR release label determines the versions of open-source application packages that Amazon EMR has installed on the cluster. Release labels are in the format emr-x.x.x, where x.x.x is an Amazon EMR release number such as emr-6.10.0. For more information about Amazon EMR releases and their included application versions and features, see the Amazon EMR Release Guide .

      " + }, + "Marker":{ + "shape":"String", + "documentation":"

      The pagination token that marks the next set of results to retrieve.

      " + } + } + }, + "ListSupportedInstanceTypesOutput":{ + "type":"structure", + "members":{ + "SupportedInstanceTypes":{ + "shape":"SupportedInstanceTypesList", + "documentation":"

      The list of instance types that the release specified in ListSupportedInstanceTypesInput$ReleaseLabel supports, filtered by Amazon Web Services Region.

      " + }, + "Marker":{ + "shape":"String", + "documentation":"

      The pagination token that marks the next set of results to retrieve.

      " + } + } + }, "Long":{"type":"long"}, "ManagedScalingPolicy":{ "type":"structure", @@ -4859,7 +4901,12 @@ }, "SpotProvisioningAllocationStrategy":{ "type":"string", - "enum":["capacity-optimized"] + "enum":[ + "capacity-optimized", + "price-capacity-optimized", + "lowest-price", + "diversified" + ] }, "SpotProvisioningSpecification":{ "type":"structure", @@ -5353,6 +5400,60 @@ "type":"list", "member":{"shape":"String"} }, + "SupportedInstanceType":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

      The Amazon EC2 instance type, for example m5.xlarge, of the SupportedInstanceType.

      " + }, + "MemoryGB":{ + "shape":"Float", + "documentation":"

      The amount of memory that is available to Amazon EMR from the SupportedInstanceType. The kernel and hypervisor software consume some memory, so this value might be lower than the overall memory for the instance type.

      " + }, + "StorageGB":{ + "shape":"Integer", + "documentation":"

      StorageGB represents the storage capacity of the SupportedInstanceType. This value is 0 for Amazon EBS-only instance types.

      " + }, + "VCPU":{ + "shape":"Integer", + "documentation":"

      The number of vCPUs available for the SupportedInstanceType.

      " + }, + "Is64BitsOnly":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the SupportedInstanceType only supports 64-bit architecture.

      " + }, + "InstanceFamilyId":{ + "shape":"String", + "documentation":"

      The Amazon EC2 family and generation for the SupportedInstanceType.

      " + }, + "EbsOptimizedAvailable":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the SupportedInstanceType supports Amazon EBS optimization.

      " + }, + "EbsOptimizedByDefault":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the SupportedInstanceType uses Amazon EBS optimization by default.

      " + }, + "NumberOfDisks":{ + "shape":"Integer", + "documentation":"

      Number of disks for the SupportedInstanceType. This value is 0 for Amazon EBS-only instance types.

      " + }, + "EbsStorageOnly":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the SupportedInstanceType only supports Amazon EBS.

      " + }, + "Architecture":{ + "shape":"String", + "documentation":"

      The CPU architecture, for example X86_64 or AARCH64.

      " + } + }, + "documentation":"

      An instance type that the specified Amazon EMR release supports.

      " + }, + "SupportedInstanceTypesList":{ + "type":"list", + "member":{"shape":"SupportedInstanceType"} + }, "SupportedProductConfig":{ "type":"structure", "members":{ diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 0016f0f0863a..25544a01af87 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrcontainers/src/main/resources/codegen-resources/service-2.json b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json index 37ca8020dd64..6ed74257cf9a 100644 --- a/services/emrcontainers/src/main/resources/codegen-resources/service-2.json +++ b/services/emrcontainers/src/main/resources/codegen-resources/service-2.json @@ -442,6 +442,24 @@ "documentation":"

      The information about the container used for a job run or a managed endpoint.

      ", "union":true }, + "ContainerLogRotationConfiguration":{ + "type":"structure", + "required":[ + "rotationSize", + "maxFilesToKeep" + ], + "members":{ + "rotationSize":{ + "shape":"RotationSize", + "documentation":"

      The file size at which to rotate logs. Minimum of 2KB, Maximum of 2GB.

      " + }, + "maxFilesToKeep":{ + "shape":"MaxFilesToKeep", + "documentation":"

      The number of files to keep in container after rotation.

      " + } + }, + "documentation":"

      The settings for container log rotation.

      " + }, "ContainerProvider":{ "type":"structure", "required":[ @@ -1276,7 +1294,7 @@ "type":"string", "max":2048, "min":3, - "pattern":"^(arn:(aws[a-zA-Z0-9-]*):kms:([a-zA-Z0-9]+-?)+:(\\d{12})?:key\\/[(0-9a-zA-Z)-?]+|\\$\\{[a-zA-Z]\\w*\\})$" + "pattern":"^(arn:(aws[a-zA-Z0-9-]*):kms:.+:(\\d{12})?:key\\/[(0-9a-zA-Z)-?]+|\\$\\{[a-zA-Z]\\w*\\})$" }, "KubernetesNamespace":{ "type":"string", @@ -1541,6 +1559,11 @@ "min":1, "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, + "MaxFilesToKeep":{ + "type":"integer", + "max":50, + "min":1 + }, "MonitoringConfiguration":{ "type":"structure", "members":{ @@ -1555,6 +1578,10 @@ "s3MonitoringConfiguration":{ "shape":"S3MonitoringConfiguration", "documentation":"

      Amazon S3 configuration for monitoring log publishing.

      " + }, + "containerLogRotationConfiguration":{ + "shape":"ContainerLogRotationConfiguration", + "documentation":"

      Enable or disable container log rotation.

      " } }, "documentation":"

      Configuration setting for monitoring.

      " @@ -1704,6 +1731,12 @@ }, "documentation":"

      The current status of the retry policy executed on the job.

      " }, + "RotationSize":{ + "type":"string", + "max":12, + "min":3, + "pattern":"^\\d+(\\.\\d+)?[KMG][Bb]?$" + }, "RsiArn":{ "type":"string", "max":500, diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index 59bb577e5043..d46e2fddfbe5 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 432d0ff05c09..a36aacefdc23 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/customization.config b/services/eventbridge/src/main/resources/codegen-resources/customization.config deleted file mode 100644 index 045f4d5b3184..000000000000 --- a/services/eventbridge/src/main/resources/codegen-resources/customization.config +++ /dev/null @@ -1,10 +0,0 @@ -{ - "skipEndpointTests": { - "Valid EndpointId with dualstack disabled and fips enabled": "Need operationInputs for EndpointId param", - "Valid EndpointId with dualstack enabled and fips enabled": "Need operationInputs for EndpointId param", - "Invalid EndpointId": "Need operationInputs for EndpointId param", - "Invalid EndpointId (empty)": "Need operationInputs for EndpointId param", - "Valid endpointId with fips disabled and dualstack true": "Need operationInputs for EndpointId param", - "Valid endpointId with custom sdk endpoint": "Need operationInputs for EndpointId param" - } -} diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index 93de055990f7..474cb69e7ed3 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/evidently/src/main/resources/codegen-resources/customization.config b/services/evidently/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/evidently/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 4cc0f22e4b1d..cd358fe399b5 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json index 7913a9c35ba0..5299e3d75a86 100644 --- a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/finspace/src/main/resources/codegen-resources/endpoint-tests.json b/services/finspace/src/main/resources/codegen-resources/endpoint-tests.json index f6da33741a8c..aabedda32235 100644 --- a/services/finspace/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/finspace/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,276 +1,333 @@ { "testCases": [ { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.ca-central-1.api.aws" + "url": "https://finspace.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.ca-central-1.amazonaws.com" + "url": "https://finspace.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.ca-central-1.api.aws" + "url": "https://finspace.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.ca-central-1.amazonaws.com" + "url": "https://finspace.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-west-2.api.aws" + "url": "https://finspace.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "us-west-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-west-2.amazonaws.com" + "url": "https://finspace-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.us-west-2.api.aws" + "url": "https://finspace-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace.us-west-2.amazonaws.com" + "url": "https://finspace.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace-fips.eu-west-1.api.aws" + "url": "https://finspace-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.eu-west-1.amazonaws.com" + "url": "https://finspace-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace.eu-west-1.api.aws" + "url": "https://finspace.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.eu-west-1.amazonaws.com" + "url": "https://finspace.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-east-1.api.aws" + "url": "https://finspace-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-east-1.amazonaws.com" + "url": "https://finspace-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://finspace.us-east-1.api.aws" + "url": "https://finspace.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.us-east-1.amazonaws.com" + "url": "https://finspace.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-east-2.api.aws" + "url": "https://finspace-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace-fips.us-east-2.amazonaws.com" + "url": "https://finspace.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.us-east-2.api.aws" + "url": "https://finspace-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://finspace.us-east-2.amazonaws.com" + "url": "https://finspace.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -280,9 +337,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -292,11 +349,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/finspace/src/main/resources/codegen-resources/paginators-1.json b/services/finspace/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ca77facdbe95 100644 --- a/services/finspace/src/main/resources/codegen-resources/paginators-1.json +++ b/services/finspace/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,25 @@ { "pagination": { + "ListKxChangesets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListKxClusterNodes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListKxDatabases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListKxEnvironments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environments" + } } } diff --git a/services/finspace/src/main/resources/codegen-resources/service-2.json b/services/finspace/src/main/resources/codegen-resources/service-2.json index bf3195feaeca..e0d63a365a5e 100644 --- a/services/finspace/src/main/resources/codegen-resources/service-2.json +++ b/services/finspace/src/main/resources/codegen-resources/service-2.json @@ -31,6 +31,104 @@ ], "documentation":"

      Create a new FinSpace environment.

      " }, + "CreateKxChangeset":{ + "name":"CreateKxChangeset", + "http":{ + "method":"POST", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}/changesets" + }, + "input":{"shape":"CreateKxChangesetRequest"}, + "output":{"shape":"CreateKxChangesetResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

      Creates a changeset for a kdb database. A changeset allows you to add and delete existing files by using an ordered list of change requests.

      " + }, + "CreateKxCluster":{ + "name":"CreateKxCluster", + "http":{ + "method":"POST", + "requestUri":"/kx/environments/{environmentId}/clusters" + }, + "input":{"shape":"CreateKxClusterRequest"}, + "output":{"shape":"CreateKxClusterResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Creates a new kdb cluster.

      " + }, + "CreateKxDatabase":{ + "name":"CreateKxDatabase", + "http":{ + "method":"POST", + "requestUri":"/kx/environments/{environmentId}/databases" + }, + "input":{"shape":"CreateKxDatabaseRequest"}, + "output":{"shape":"CreateKxDatabaseResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

      Creates a new kdb database in the environment.

      " + }, + "CreateKxEnvironment":{ + "name":"CreateKxEnvironment", + "http":{ + "method":"POST", + "requestUri":"/kx/environments" + }, + "input":{"shape":"CreateKxEnvironmentRequest"}, + "output":{"shape":"CreateKxEnvironmentResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Creates a managed kdb environment for the account.

      " + }, + "CreateKxUser":{ + "name":"CreateKxUser", + "http":{ + "method":"POST", + "requestUri":"/kx/environments/{environmentId}/users" + }, + "input":{"shape":"CreateKxUserRequest"}, + "output":{"shape":"CreateKxUserResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Creates a user in FinSpace kdb environment with an associated IAM role.

      " + }, "DeleteEnvironment":{ "name":"DeleteEnvironment", "http":{ @@ -48,6 +146,77 @@ ], "documentation":"

      Delete an FinSpace environment.

      " }, + "DeleteKxCluster":{ + "name":"DeleteKxCluster", + "http":{ + "method":"DELETE", + "requestUri":"/kx/environments/{environmentId}/clusters/{clusterName}" + }, + "input":{"shape":"DeleteKxClusterRequest"}, + "output":{"shape":"DeleteKxClusterResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Deletes a kdb cluster.

      " + }, + "DeleteKxDatabase":{ + "name":"DeleteKxDatabase", + "http":{ + "method":"DELETE", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}" + }, + "input":{"shape":"DeleteKxDatabaseRequest"}, + "output":{"shape":"DeleteKxDatabaseResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Deletes the specified database and all of its associated data. This action is irreversible. You must copy any data out of the database before deleting it if the data is to be retained.

      " + }, + "DeleteKxEnvironment":{ + "name":"DeleteKxEnvironment", + "http":{ + "method":"DELETE", + "requestUri":"/kx/environments/{environmentId}" + }, + "input":{"shape":"DeleteKxEnvironmentRequest"}, + "output":{"shape":"DeleteKxEnvironmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Deletes the kdb environment. This action is irreversible. Deleting a kdb environment will remove all the associated data and any services running in it.

      " + }, + "DeleteKxUser":{ + "name":"DeleteKxUser", + "http":{ + "method":"DELETE", + "requestUri":"/kx/environments/{environmentId}/users/{userName}" + }, + "input":{"shape":"DeleteKxUserRequest"}, + "output":{"shape":"DeleteKxUserResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Deletes a user in the specified kdb environment.

      " + }, "GetEnvironment":{ "name":"GetEnvironment", "http":{ @@ -64,6 +233,109 @@ ], "documentation":"

      Returns the FinSpace environment object.

      " }, + "GetKxChangeset":{ + "name":"GetKxChangeset", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}/changesets/{changesetId}" + }, + "input":{"shape":"GetKxChangesetRequest"}, + "output":{"shape":"GetKxChangesetResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns information about a kdb changeset.

      " + }, + "GetKxCluster":{ + "name":"GetKxCluster", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/clusters/{clusterName}" + }, + "input":{"shape":"GetKxClusterRequest"}, + "output":{"shape":"GetKxClusterResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves information about a kdb cluster.

      " + }, + "GetKxConnectionString":{ + "name":"GetKxConnectionString", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/connectionString" + }, + "input":{"shape":"GetKxConnectionStringRequest"}, + "output":{"shape":"GetKxConnectionStringResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Retrieves a connection string for a user to connect to a kdb cluster. You must call this API using the same role that you have defined while creating a user.

      " + }, + "GetKxDatabase":{ + "name":"GetKxDatabase", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}" + }, + "input":{"shape":"GetKxDatabaseRequest"}, + "output":{"shape":"GetKxDatabaseResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns database information for the specified environment ID.

      " + }, + "GetKxEnvironment":{ + "name":"GetKxEnvironment", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}" + }, + "input":{"shape":"GetKxEnvironmentRequest"}, + "output":{"shape":"GetKxEnvironmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

      Retrieves all the information for the specified kdb environment.

      " + }, + "GetKxUser":{ + "name":"GetKxUser", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/users/{userName}" + }, + "input":{"shape":"GetKxUserRequest"}, + "output":{"shape":"GetKxUserResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Retrieves information about the specified kdb user.

      " + }, "ListEnvironments":{ "name":"ListEnvironments", "http":{ @@ -78,6 +350,108 @@ ], "documentation":"

      A list of all of your FinSpace environments.

      " }, + "ListKxChangesets":{ + "name":"ListKxChangesets", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}/changesets" + }, + "input":{"shape":"ListKxChangesetsRequest"}, + "output":{"shape":"ListKxChangesetsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns a list of all the changesets for a database.

      " + }, + "ListKxClusterNodes":{ + "name":"ListKxClusterNodes", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/clusters/{clusterName}/nodes" + }, + "input":{"shape":"ListKxClusterNodesRequest"}, + "output":{"shape":"ListKxClusterNodesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Lists all the nodes in a kdb cluster.

      " + }, + "ListKxClusters":{ + "name":"ListKxClusters", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/clusters" + }, + "input":{"shape":"ListKxClustersRequest"}, + "output":{"shape":"ListKxClustersResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns a list of clusters.

      " + }, + "ListKxDatabases":{ + "name":"ListKxDatabases", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/databases" + }, + "input":{"shape":"ListKxDatabasesRequest"}, + "output":{"shape":"ListKxDatabasesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Returns a list of all the databases in the kdb environment.

      " + }, + "ListKxEnvironments":{ + "name":"ListKxEnvironments", + "http":{ + "method":"GET", + "requestUri":"/kx/environments" + }, + "input":{"shape":"ListKxEnvironmentsRequest"}, + "output":{"shape":"ListKxEnvironmentsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Returns a list of kdb environments created in an account.

      " + }, + "ListKxUsers":{ + "name":"ListKxUsers", + "http":{ + "method":"GET", + "requestUri":"/kx/environments/{environmentId}/users" + }, + "input":{"shape":"ListKxUsersRequest"}, + "output":{"shape":"ListKxUsersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Lists all the users in a kdb environment.

      " + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -139,25 +513,275 @@ {"shape":"ValidationException"} ], "documentation":"

      Update your FinSpace environment.

      " - } - }, - "shapes":{ - "AccessDeniedException":{ - "type":"structure", - "members":{ - }, - "documentation":"

      You do not have sufficient access to perform this action.

      ", - "error":{"httpStatusCode":403}, - "exception":true }, - "AttributeMap":{ - "type":"map", - "key":{"shape":"FederationAttributeKey"}, - "value":{"shape":"url"} + "UpdateKxClusterDatabases":{ + "name":"UpdateKxClusterDatabases", + "http":{ + "method":"PUT", + "requestUri":"/kx/environments/{environmentId}/clusters/{clusterName}/configuration/databases" + }, + "input":{"shape":"UpdateKxClusterDatabasesRequest"}, + "output":{"shape":"UpdateKxClusterDatabasesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Updates the databases mounted on a kdb cluster, which includes the changesetId and all the dbPaths to be cached. This API does not allow you to change a database name or add a database if you created a cluster without one.

      Using this API you can point a cluster to a different changeset and modify a list of partitions being cached.

      " }, - "CreateEnvironmentRequest":{ - "type":"structure", - "required":["name"], + "UpdateKxDatabase":{ + "name":"UpdateKxDatabase", + "http":{ + "method":"PUT", + "requestUri":"/kx/environments/{environmentId}/databases/{databaseName}" + }, + "input":{"shape":"UpdateKxDatabaseRequest"}, + "output":{"shape":"UpdateKxDatabaseResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Updates information for the given kdb database.

      " + }, + "UpdateKxEnvironment":{ + "name":"UpdateKxEnvironment", + "http":{ + "method":"PUT", + "requestUri":"/kx/environments/{environmentId}" + }, + "input":{"shape":"UpdateKxEnvironmentRequest"}, + "output":{"shape":"UpdateKxEnvironmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Updates information for the given kdb environment.

      " + }, + "UpdateKxEnvironmentNetwork":{ + "name":"UpdateKxEnvironmentNetwork", + "http":{ + "method":"PUT", + "requestUri":"/kx/environments/{environmentId}/network" + }, + "input":{"shape":"UpdateKxEnvironmentNetworkRequest"}, + "output":{"shape":"UpdateKxEnvironmentNetworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Updates environment network to connect to your internal network by using a transit gateway. This API supports request to create a transit gateway attachment from FinSpace VPC to your transit gateway ID and create a custom Route-53 outbound resolvers.

      Once you send a request to update a network, you cannot change it again. Network update might require termination of any clusters that are running in the existing network.

      " + }, + "UpdateKxUser":{ + "name":"UpdateKxUser", + "http":{ + "method":"PUT", + "requestUri":"/kx/environments/{environmentId}/users/{userName}" + }, + "input":{"shape":"UpdateKxUserRequest"}, + "output":{"shape":"UpdateKxUserResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Updates the user details. You can only update the IAM role associated with a user.

      " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

      You do not have sufficient access to perform this action.

      ", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AttributeMap":{ + "type":"map", + "key":{"shape":"FederationAttributeKey"}, + "value":{"shape":"FederationAttributeValue"} + }, + "AutoScalingConfiguration":{ + "type":"structure", + "members":{ + "minNodeCount":{ + "shape":"NodeCount", + "documentation":"

      The lowest number of nodes to scale. This value must be at least 1 and less than the maxNodeCount. If the nodes in a cluster belong to multiple availability zones, then minNodeCount must be at least 3.

      " + }, + "maxNodeCount":{ + "shape":"NodeCount", + "documentation":"

      The highest number of nodes to scale. This value cannot be greater than 5.

      " + }, + "autoScalingMetric":{ + "shape":"AutoScalingMetric", + "documentation":"

      The metric your cluster will track in order to scale in and out. For example, CPU_UTILIZATION_PERCENTAGE is the average CPU usage across all the nodes in a cluster.

      " + }, + "metricTarget":{ + "shape":"AutoScalingMetricTarget", + "documentation":"

      The desired value of the chosen autoScalingMetric. When the metric drops below this value, the cluster will scale in. When the metric goes above this value, the cluster will scale out. You can set the target value between 1 and 100 percent.

      " + }, + "scaleInCooldownSeconds":{ + "shape":"CooldownTime", + "documentation":"

      The duration in seconds that FinSpace will wait after a scale in event before initiating another scaling event.

      " + }, + "scaleOutCooldownSeconds":{ + "shape":"CooldownTime", + "documentation":"

      The duration in seconds that FinSpace will wait after a scale out event before initiating another scaling event.

      " + } + }, + "documentation":"

      The configuration based on which FinSpace will scale in or scale out nodes in your cluster.

      " + }, + "AutoScalingMetric":{ + "type":"string", + "enum":["CPU_UTILIZATION_PERCENTAGE"] + }, + "AutoScalingMetricTarget":{ + "type":"double", + "max":100, + "min":1 + }, + "AvailabilityZoneId":{"type":"string"}, + "AvailabilityZoneIds":{ + "type":"list", + "member":{"shape":"AvailabilityZoneId"} + }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "CapacityConfiguration":{ + "type":"structure", + "members":{ + "nodeType":{ + "shape":"NodeType", + "documentation":"

      The type that determines the hardware of the host computer used for your cluster instance. Each node type offers different memory and storage capabilities. Choose a node type based on the requirements of the application or software that you plan to run on your instance.

      You can only specify one of the following values:

      • kx.s.large – The node type with a configuration of 12 GiB memory and 2 vCPUs.

      • kx.s.xlarge – The node type with a configuration of 27 GiB memory and 4 vCPUs.

      • kx.s.2xlarge – The node type with a configuration of 54 GiB memory and 8 vCPUs.

      • kx.s.4xlarge – The node type with a configuration of 108 GiB memory and 16 vCPUs.

      • kx.s.8xlarge – The node type with a configuration of 216 GiB memory and 32 vCPUs.

      • kx.s.16xlarge – The node type with a configuration of 432 GiB memory and 64 vCPUs.

      • kx.s.32xlarge – The node type with a configuration of 864 GiB memory and 128 vCPUs.

      " + }, + "nodeCount":{ + "shape":"NodeCount", + "documentation":"

      The number of instances running in a cluster.

      " + } + }, + "documentation":"

      A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

      " + }, + "ChangeRequest":{ + "type":"structure", + "required":[ + "changeType", + "dbPath" + ], + "members":{ + "changeType":{ + "shape":"ChangeType", + "documentation":"

      Defines the type of change request. A changeType can have the following values:

      • PUT – Adds or updates files in a database.

      • DELETE – Deletes files in a database.

      " + }, + "s3Path":{ + "shape":"S3Path", + "documentation":"

      Defines the S3 path of the source file that is required to add or update files in a database.

      " + }, + "dbPath":{ + "shape":"DbPath", + "documentation":"

      Defines the path within the database directory.

      " + } + }, + "documentation":"

      A list of change request objects.

      " + }, + "ChangeRequests":{ + "type":"list", + "member":{"shape":"ChangeRequest"}, + "max":32, + "min":1 + }, + "ChangeType":{ + "type":"string", + "enum":[ + "PUT", + "DELETE" + ] + }, + "ChangesetId":{ + "type":"string", + "max":26, + "min":1 + }, + "ChangesetStatus":{ + "type":"string", + "enum":[ + "PENDING", + "PROCESSING", + "FAILED", + "COMPLETED" + ] + }, + "ClientToken":{ + "type":"string", + "max":36, + "min":1, + "pattern":".*\\S.*" + }, + "ClientTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "CodeConfiguration":{ + "type":"structure", + "members":{ + "s3Bucket":{ + "shape":"S3Bucket", + "documentation":"

      A unique name for the S3 bucket.

      " + }, + "s3Key":{ + "shape":"S3Key", + "documentation":"

      The full S3 path (excluding bucket) to the .zip file. This file contains the code that is loaded onto the cluster when it's started.

      " + }, + "s3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

      The version of an S3 object.

      " + } + }, + "documentation":"

      The structure of the customer code available within the running cluster.

      " + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"}, + "reason":{ + "shape":"errorMessage", + "documentation":"

      The reason for the conflict exception.

      " + } + }, + "documentation":"

      There was a conflict with this action, and it could not be completed.

      ", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CooldownTime":{ + "type":"double", + "max":100000, + "min":0 + }, + "CreateEnvironmentRequest":{ + "type":"structure", + "required":["name"], "members":{ "name":{ "shape":"EnvironmentName", @@ -206,235 +830,1742 @@ }, "environmentUrl":{ "shape":"url", - "documentation":"

      The sign-in url for the web application of the FinSpace environment you created.

      " + "documentation":"

      The sign-in URL for the web application of the FinSpace environment you created.

      " } } }, - "DataBundleArn":{ - "type":"string", - "documentation":"

      The Amazon Resource Name (ARN) of the data bundle.

      ", - "max":2048, - "min":20, - "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d*:data-bundle/[0-9A-Za-z_-]{1,128}$" - }, - "DataBundleArns":{ - "type":"list", - "member":{"shape":"DataBundleArn"} - }, - "DeleteEnvironmentRequest":{ + "CreateKxChangesetRequest":{ "type":"structure", - "required":["environmentId"], + "required":[ + "environmentId", + "databaseName", + "changeRequests", + "clientToken" + ], "members":{ "environmentId":{ - "shape":"IdType", - "documentation":"

      The identifier for the FinSpace environment.

      ", + "shape":"EnvironmentId", + "documentation":"

      A unique identifier of the kdb environment.

      ", "location":"uri", "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      ", + "location":"uri", + "locationName":"databaseName" + }, + "changeRequests":{ + "shape":"ChangeRequests", + "documentation":"

      A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and a dbPath. A changeType can has the following values:

      • PUT – Adds or updates files in a database.

      • DELETE – Deletes files in a database.

      All the change requests require a mandatory dbPath attribute that defines the path within the database directory. The s3Path attribute defines the s3 source file path and is required for a PUT change type.

      Here is an example of how you can use the change request object:

      [ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}, { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}, { \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.01/\"} ]

      In this example, the first request with PUT change type allows you to add files in the given s3Path under the 2020.01.02 partition of the database. The second request with PUT change type allows you to add a single sym file at database root location. The last request with DELETE change type allows you to delete the files under the 2020.01.01 partition of the database.

      " + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true } } }, - "DeleteEnvironmentResponse":{ + "CreateKxChangesetResponse":{ "type":"structure", "members":{ + "changesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier for the changeset.

      " + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "changeRequests":{ + "shape":"ChangeRequests", + "documentation":"

      A list of change requests.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "status":{ + "shape":"ChangesetStatus", + "documentation":"

      Status of the changeset creation process.

      • Pending – Changeset creation is pending.

      • Processing – Changeset creation is running.

      • Failed – Changeset creation has failed.

      • Complete – Changeset creation has succeeded.

      " + }, + "errorInfo":{ + "shape":"ErrorInfo", + "documentation":"

      The details of the error that you receive when creating a changeset. It consists of the type of error and the error message.

      " + } } }, - "Description":{ - "type":"string", - "max":1000, - "min":1, - "pattern":"^[a-zA-Z0-9. ]{1,1000}$" - }, - "EmailId":{ - "type":"string", - "max":128, - "min":1, - "pattern":"[A-Z0-9a-z._%+-]+@[A-Za-z0-9.-]+[.]+[A-Za-z]+", - "sensitive":true - }, - "Environment":{ + "CreateKxClusterRequest":{ "type":"structure", + "required":[ + "environmentId", + "clusterName", + "clusterType", + "capacityConfiguration", + "releaseLabel", + "azMode" + ], "members":{ - "name":{ - "shape":"EnvironmentName", - "documentation":"

      The name of the FinSpace environment.

      " + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true }, "environmentId":{ - "shape":"IdType", - "documentation":"

      The identifier of the FinSpace environment.

      " + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" }, - "awsAccountId":{ - "shape":"IdType", - "documentation":"

      The ID of the AWS account in which the FinSpace environment is created.

      " + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster that you want to create.

      " + }, + "clusterType":{ + "shape":"KxClusterType", + "documentation":"

      Specifies the type of KDB database that is being created. The following types are available:

      • HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.

      • RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter.

      • GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.

      " + }, + "databases":{ + "shape":"KxDatabaseConfigurations", + "documentation":"

      A list of databases that will be available for querying.

      " + }, + "cacheStorageConfigurations":{ + "shape":"KxCacheStorageConfigurations", + "documentation":"

      The configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store.

      " + }, + "autoScalingConfiguration":{ + "shape":"AutoScalingConfiguration", + "documentation":"

      The configuration based on which FinSpace will scale in or scale out nodes in your cluster.

      " + }, + "clusterDescription":{ + "shape":"KxClusterDescription", + "documentation":"

      A description of the cluster.

      " + }, + "capacityConfiguration":{ + "shape":"CapacityConfiguration", + "documentation":"

      A structure for the metadata of a cluster. It includes information about like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

      " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

      The version of FinSpace managed kdb to run.

      " + }, + "vpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

      Configuration details about the network where the Privatelink endpoint of the cluster resides.

      " + }, + "initializationScript":{ + "shape":"InitializationScriptFilePath", + "documentation":"

      Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.

      " + }, + "commandLineArguments":{ + "shape":"KxCommandLineArguments", + "documentation":"

      Defines the key-value pairs to make them available inside the cluster.

      " + }, + "code":{ + "shape":"CodeConfiguration", + "documentation":"

      The details of the custom code that you want to use inside a cluster when analyzing a data. It consists of the S3 source bucket, location, S3 object version, and the relative path from where the custom code is loaded into the cluster.

      " + }, + "executionRole":{ + "shape":"ExecutionRoleArn", + "documentation":"

      An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster.

      " + }, + "savedownStorageConfiguration":{ + "shape":"KxSavedownStorageConfiguration", + "documentation":"

      The size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose clusterType as RDB. All the data written to this storage space is lost when the cluster node is restarted.

      " + }, + "azMode":{ + "shape":"KxAzMode", + "documentation":"

      The number of availability zones you want to assign per cluster. This can be one of the following

      • SINGLE – Assigns one availability zone per cluster.

      • MULTI – Assigns all the availability zones per cluster.

      " + }, + "availabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

      The availability zone identifiers for the requested regions.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      A list of key-value pairs to label the cluster. You can add up to 50 tags to a cluster.

      " + } + } + }, + "CreateKxClusterResponse":{ + "type":"structure", + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " }, "status":{ - "shape":"EnvironmentStatus", - "documentation":"

      The current status of creation of the FinSpace environment.

      " + "shape":"KxClusterStatus", + "documentation":"

      The status of cluster creation.

      • PENDING – The cluster is pending creation.

      • CREATING – The cluster creation process is in progress.

      • CREATE_FAILED – The cluster creation process has failed.

      • RUNNING – The cluster creation process is running.

      • UPDATING – The cluster is in the process of being updated.

      • DELETING – The cluster is in the process of being deleted.

      • DELETED – The cluster has been deleted.

      • DELETE_FAILED – The cluster failed to delete.

      " }, - "environmentUrl":{ - "shape":"url", - "documentation":"

      The sign-in url for the web application of your FinSpace environment.

      " + "statusReason":{ + "shape":"KxClusterStatusReason", + "documentation":"

      The error message when a failed state occurs.

      " }, - "description":{ - "shape":"Description", - "documentation":"

      The description of the FinSpace environment.

      " + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster.

      " }, - "environmentArn":{ - "shape":"EnvironmentArn", - "documentation":"

      The Amazon Resource Name (ARN) of your FinSpace environment.

      " + "clusterType":{ + "shape":"KxClusterType", + "documentation":"

      Specifies the type of KDB database that is being created. The following types are available:

      • HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.

      • RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter.

      • GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.

      " }, - "sageMakerStudioDomainUrl":{ - "shape":"SmsDomainUrl", - "documentation":"

      The url of the integrated FinSpace notebook environment in your web application.

      " + "databases":{ + "shape":"KxDatabaseConfigurations", + "documentation":"

      A list of databases that will be available for querying.

      " }, - "kmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

      The KMS key id used to encrypt in the FinSpace environment.

      " + "cacheStorageConfigurations":{ + "shape":"KxCacheStorageConfigurations", + "documentation":"

      The configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store.

      " }, - "dedicatedServiceAccountId":{ - "shape":"IdType", - "documentation":"

      The AWS account ID of the dedicated service account associated with your FinSpace environment.

      " + "autoScalingConfiguration":{ + "shape":"AutoScalingConfiguration", + "documentation":"

      The configuration based on which FinSpace will scale in or scale out nodes in your cluster.

      " }, - "federationMode":{ - "shape":"FederationMode", - "documentation":"

      The authentication mode for the environment.

      " + "clusterDescription":{ + "shape":"KxClusterDescription", + "documentation":"

      A description of the cluster.

      " }, - "federationParameters":{ - "shape":"FederationParameters", - "documentation":"

      Configuration information when authentication mode is FEDERATED.

      " + "capacityConfiguration":{ + "shape":"CapacityConfiguration", + "documentation":"

      A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

      " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

      A version of the FinSpace managed kdb to run.

      " + }, + "vpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

      Configuration details about the network where the Privatelink endpoint of the cluster resides.

      " + }, + "initializationScript":{ + "shape":"InitializationScriptFilePath", + "documentation":"

      Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.

      " + }, + "commandLineArguments":{ + "shape":"KxCommandLineArguments", + "documentation":"

      Defines the key-value pairs to make them available inside the cluster.

      " + }, + "code":{ + "shape":"CodeConfiguration", + "documentation":"

      The details of the custom code that you want to use inside a cluster when analyzing a data. It consists of the S3 source bucket, location, S3 object version, and the relative path from where the custom code is loaded into the cluster.

      " + }, + "executionRole":{ + "shape":"ExecutionRoleArn", + "documentation":"

      An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the cluster was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "savedownStorageConfiguration":{ + "shape":"KxSavedownStorageConfiguration", + "documentation":"

      The size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose clusterType as RDB. All the data written to this storage space is lost when the cluster node is restarted.

      " + }, + "azMode":{ + "shape":"KxAzMode", + "documentation":"

      The number of availability zones you want to assign per cluster. This can be one of the following

      • SINGLE – Assigns one availability zone per cluster.

      • MULTI – Assigns all the availability zones per cluster.

      " + }, + "availabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

      The availability zone identifiers for the requested regions.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the cluster was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " } - }, - "documentation":"

      Represents an FinSpace environment.

      " + } }, - "EnvironmentArn":{ - "type":"string", - "max":2048, - "min":20, + "CreateKxDatabaseRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName", + "clientToken" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the database.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      A list of key-value pairs to label the kdb database. You can add up to 50 tags to your kdb database

      " + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true + } + } + }, + "CreateKxDatabaseResponse":{ + "type":"structure", + "members":{ + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "databaseArn":{ + "shape":"DatabaseArn", + "documentation":"

      The ARN identifier of the database.

      " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the database.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the database is created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the database was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + } + } + }, + "CreateKxEnvironmentRequest":{ + "type":"structure", + "required":[ + "name", + "kmsKeyId" + ], + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment that you want to create.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description for the kdb environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyARN", + "documentation":"

      The KMS key ID to encrypt your data in the FinSpace environment.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      A list of key-value pairs to label the kdb environment. You can add up to 50 tags to your kdb environment.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + } + } + }, + "CreateKxEnvironmentResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The status of the kdb environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description for the kdb environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The ARN identifier of the environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The KMS key ID to encrypt your data in the FinSpace environment.

      " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was created in FinSpace.

      " + } + } + }, + "CreateKxUserRequest":{ + "type":"structure", + "required":[ + "environmentId", + "userName", + "iamRole" + ], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment where you want to create a user.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      " + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that will be associated with the user.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      A list of key-value pairs to label the user. You can add up to 50 tags to a user.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + } + } + }, + "CreateKxUserResponse":{ + "type":"structure", + "members":{ + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      " + }, + "userArn":{ + "shape":"KxUserArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that will be associated with the user.

      " + } + } + }, + "CustomDNSConfiguration":{ + "type":"list", + "member":{"shape":"CustomDNSServer"} + }, + "CustomDNSServer":{ + "type":"structure", + "required":[ + "customDNSServerName", + "customDNSServerIP" + ], + "members":{ + "customDNSServerName":{ + "shape":"ValidHostname", + "documentation":"

      The name of the DNS server.

      " + }, + "customDNSServerIP":{ + "shape":"ValidIPAddress", + "documentation":"

      The IP address of the DNS server.

      " + } + }, + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " + }, + "DataBundleArn":{ + "type":"string", + "documentation":"

      The Amazon Resource Name (ARN) of the data bundle.

      ", + "max":2048, + "min":20, + "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d*:data-bundle/[0-9A-Za-z_-]{1,128}$" + }, + "DataBundleArns":{ + "type":"list", + "member":{"shape":"DataBundleArn"} + }, + "DatabaseArn":{"type":"string"}, + "DatabaseName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-_]*[a-zA-Z0-9]$" + }, + "DbPath":{ + "type":"string", + "max":1025, + "min":1, + "pattern":"^\\/([^\\/]+\\/){0,2}[^\\/]*$" + }, + "DbPaths":{ + "type":"list", + "member":{"shape":"DbPath"} + }, + "DeleteEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      The identifier for the FinSpace environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } + }, + "DeleteEnvironmentResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKxClusterRequest":{ + "type":"structure", + "required":[ + "environmentId", + "clusterName" + ], + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      The name of the cluster that you want to delete.

      ", + "location":"uri", + "locationName":"clusterName" + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteKxClusterResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKxDatabaseRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName", + "clientToken" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database that you want to delete.

      ", + "location":"uri", + "locationName":"databaseName" + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteKxDatabaseResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKxEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } + }, + "DeleteKxEnvironmentResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKxUserRequest":{ + "type":"structure", + "required":[ + "userName", + "environmentId" + ], + "members":{ + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user that you want to delete.

      ", + "location":"uri", + "locationName":"userName" + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } + }, + "DeleteKxUserResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[a-zA-Z0-9. ]{1,1000}$" + }, + "EmailId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Z0-9a-z._%+-]+@[A-Za-z0-9.-]+[.]+[A-Za-z]+", + "sensitive":true + }, + "Environment":{ + "type":"structure", + "members":{ + "name":{ + "shape":"EnvironmentName", + "documentation":"

      The name of the FinSpace environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      The identifier of the FinSpace environment.

      " + }, + "awsAccountId":{ + "shape":"IdType", + "documentation":"

      The ID of the AWS account in which the FinSpace environment is created.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The current status of creation of the FinSpace environment.

      " + }, + "environmentUrl":{ + "shape":"url", + "documentation":"

      The sign-in URL for the web application of your FinSpace environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the FinSpace environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The Amazon Resource Name (ARN) of your FinSpace environment.

      " + }, + "sageMakerStudioDomainUrl":{ + "shape":"SmsDomainUrl", + "documentation":"

      The URL of the integrated FinSpace notebook environment in your web application.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The KMS key id used to encrypt in the FinSpace environment.

      " + }, + "dedicatedServiceAccountId":{ + "shape":"IdType", + "documentation":"

      The AWS account ID of the dedicated service account associated with your FinSpace environment.

      " + }, + "federationMode":{ + "shape":"FederationMode", + "documentation":"

      The authentication mode for the environment.

      " + }, + "federationParameters":{ + "shape":"FederationParameters", + "documentation":"

      Configuration information when authentication mode is FEDERATED.

      " + } + }, + "documentation":"

      Represents an FinSpace environment.

      " + }, + "EnvironmentArn":{ + "type":"string", + "max":2048, + "min":20, "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d+:environment/[0-9A-Za-z_-]{1,128}$" }, - "EnvironmentList":{ - "type":"list", - "member":{"shape":"Environment"} + "EnvironmentErrorMessage":{ + "type":"string", + "max":1000, + "min":0, + "pattern":"^[a-zA-Z0-9. ]{1,1000}$" + }, + "EnvironmentId":{ + "type":"string", + "max":32, + "min":1, + "pattern":".*\\S.*" + }, + "EnvironmentList":{ + "type":"list", + "member":{"shape":"Environment"} + }, + "EnvironmentName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$" + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "CREATE_REQUESTED", + "CREATING", + "CREATED", + "DELETE_REQUESTED", + "DELETING", + "DELETED", + "FAILED_CREATION", + "RETRY_DELETION", + "FAILED_DELETION", + "UPDATE_NETWORK_REQUESTED", + "UPDATING_NETWORK", + "FAILED_UPDATING_NETWORK", + "SUSPENDED" + ] + }, + "ErrorDetails":{ + "type":"string", + "enum":[ + "The inputs to this request are invalid.", + "Service limits have been exceeded.", + "Missing required permission to perform this request.", + "One or more inputs to this request were not found.", + "The system temporarily lacks sufficient resources to process the request.", + "An internal error has occurred.", + "Cancelled", + "A user recoverable error has occurred" + ] + }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

      Specifies the error message that appears if a flow fails.

      " + }, + "errorType":{ + "shape":"ErrorDetails", + "documentation":"

      Specifies the type of error.

      " + } + }, + "documentation":"

      Provides details in the event of a failed flow, including the error type and the related error message.

      " + }, + "ErrorMessage":{ + "type":"string", + "max":1000 + }, + "ExecutionRoleArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^arn:aws[a-z0-9-]*:iam::\\d{12}:role\\/[\\w-\\/.@+=,]{1,1017}$" + }, + "FederationAttributeKey":{ + "type":"string", + "max":32, + "min":1, + "pattern":".*" + }, + "FederationAttributeValue":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "FederationMode":{ + "type":"string", + "enum":[ + "FEDERATED", + "LOCAL" + ] + }, + "FederationParameters":{ + "type":"structure", + "members":{ + "samlMetadataDocument":{ + "shape":"SamlMetadataDocument", + "documentation":"

      SAML 2.0 Metadata document from identity provider (IdP).

      " + }, + "samlMetadataURL":{ + "shape":"url", + "documentation":"

      Provide the metadata URL from your SAML 2.0 compliant identity provider (IdP).

      " + }, + "applicationCallBackURL":{ + "shape":"url", + "documentation":"

      The redirect or sign-in URL that should be entered into the SAML 2.0 compliant identity provider configuration (IdP).

      " + }, + "federationURN":{ + "shape":"urn", + "documentation":"

      The Uniform Resource Name (URN). Also referred as Service Provider URN or Audience URI or Service Provider Entity ID.

      " + }, + "federationProviderName":{ + "shape":"FederationProviderName", + "documentation":"

      Name of the identity provider (IdP).

      " + }, + "attributeMap":{ + "shape":"AttributeMap", + "documentation":"

      SAML attribute name and value. The name must always be Email and the value should be set to the attribute definition in which user email is set. For example, name would be Email and value http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress. Please check your SAML 2.0 compliant identity provider (IdP) documentation for details.

      " + } + }, + "documentation":"

      Configuration information when authentication mode is FEDERATED.

      " + }, + "FederationProviderName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[^_\\p{Z}][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_\\p{Z}]+" + }, + "FinSpaceTaggableArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d+:(environment|kxEnvironment)/[0-9A-Za-z_-]{1,128}(/(kxDatabase|kxCluster|kxUser)/[a-zA-Z0-9_-]{1,255})?$" + }, + "GetEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      The identifier of the FinSpace environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } + }, + "GetEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

      The name of the FinSpace environment.

      " + } + } + }, + "GetKxChangesetRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName", + "changesetId" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      ", + "location":"uri", + "locationName":"databaseName" + }, + "changesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier of the changeset for which you want to retrieve data.

      ", + "location":"uri", + "locationName":"changesetId" + } + } + }, + "GetKxChangesetResponse":{ + "type":"structure", + "members":{ + "changesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier for the changeset.

      " + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "changeRequests":{ + "shape":"ChangeRequests", + "documentation":"

      A list of change request objects that are run in order.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "activeFromTimestamp":{ + "shape":"Timestamp", + "documentation":"

      Beginning time from which the changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "status":{ + "shape":"ChangesetStatus", + "documentation":"

      Status of the changeset creation process.

      • Pending – Changeset creation is pending.

      • Processing – Changeset creation is running.

      • Failed – Changeset creation has failed.

      • Complete – Changeset creation has succeeded.

      " + }, + "errorInfo":{ + "shape":"ErrorInfo", + "documentation":"

      Provides details in the event of a failed flow, including the error type and the related error message.

      " + } + } + }, + "GetKxClusterRequest":{ + "type":"structure", + "required":[ + "environmentId", + "clusterName" + ], + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      The name of the cluster that you want to retrieve.

      ", + "location":"uri", + "locationName":"clusterName" + } + } + }, + "GetKxClusterResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"KxClusterStatus", + "documentation":"

      The status of cluster creation.

      • PENDING – The cluster is pending creation.

      • CREATING – The cluster creation process is in progress.

      • CREATE_FAILED – The cluster creation process has failed.

      • RUNNING – The cluster creation process is running.

      • UPDATING – The cluster is in the process of being updated.

      • DELETING – The cluster is in the process of being deleted.

      • DELETED – The cluster has been deleted.

      • DELETE_FAILED – The cluster failed to delete.

      " + }, + "statusReason":{ + "shape":"KxClusterStatusReason", + "documentation":"

      The error message when a failed state occurs.

      " + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster.

      " + }, + "clusterType":{ + "shape":"KxClusterType", + "documentation":"

      Specifies the type of KDB database that is being created. The following types are available:

      • HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.

      • RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter.

      • GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.

      " + }, + "databases":{ + "shape":"KxDatabaseConfigurations", + "documentation":"

      A list of databases mounted on the cluster.

      " + }, + "cacheStorageConfigurations":{ + "shape":"KxCacheStorageConfigurations", + "documentation":"

      The configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store.

      " + }, + "autoScalingConfiguration":{ + "shape":"AutoScalingConfiguration", + "documentation":"

      The configuration based on which FinSpace will scale in or scale out nodes in your cluster.

      " + }, + "clusterDescription":{ + "shape":"KxClusterDescription", + "documentation":"

      A description of the cluster.

      " + }, + "capacityConfiguration":{ + "shape":"CapacityConfiguration", + "documentation":"

      A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

      " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

      The version of FinSpace managed kdb to run.

      " + }, + "vpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

      Configuration details about the network where the Privatelink endpoint of the cluster resides.

      " + }, + "initializationScript":{ + "shape":"InitializationScriptFilePath", + "documentation":"

      Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.

      " + }, + "commandLineArguments":{ + "shape":"KxCommandLineArguments", + "documentation":"

      Defines key-value pairs to make them available inside the cluster.

      " + }, + "code":{ + "shape":"CodeConfiguration", + "documentation":"

      The details of the custom code that you want to use inside a cluster when analyzing a data. It consists of the S3 source bucket, location, S3 object version, and the relative path from where the custom code is loaded into the cluster.

      " + }, + "executionRole":{ + "shape":"ExecutionRoleArn", + "documentation":"

      An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the cluster was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "savedownStorageConfiguration":{ + "shape":"KxSavedownStorageConfiguration", + "documentation":"

      The size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose clusterType as RDB. All the data written to this storage space is lost when the cluster node is restarted.

      " + }, + "azMode":{ + "shape":"KxAzMode", + "documentation":"

      The number of availability zones you want to assign per cluster. This can be one of the following

      • SINGLE – Assigns one availability zone per cluster.

      • MULTI – Assigns all the availability zones per cluster.

      " + }, + "availabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

      The availability zone identifiers for the requested regions.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the cluster was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + } + } + }, + "GetKxConnectionStringRequest":{ + "type":"structure", + "required":[ + "userArn", + "environmentId", + "clusterName" + ], + "members":{ + "userArn":{ + "shape":"KxUserArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

      ", + "location":"querystring", + "locationName":"userArn" + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A name of the kdb cluster.

      ", + "location":"querystring", + "locationName":"clusterName" + } + } + }, + "GetKxConnectionStringResponse":{ + "type":"structure", + "members":{ + "signedConnectionString":{ + "shape":"SignedKxConnectionString", + "documentation":"

      The signed connection string that you can use to connect to clusters.

      " + } + } + }, + "GetKxDatabaseRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      ", + "location":"uri", + "locationName":"databaseName" + } + } + }, + "GetKxDatabaseResponse":{ + "type":"structure", + "members":{ + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database for which the information is retrieved.

      " + }, + "databaseArn":{ + "shape":"DatabaseArn", + "documentation":"

      The ARN identifier of the database.

      " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the database.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the database is created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the database was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastCompletedChangesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier for the changeset.

      " + }, + "numBytes":{ + "shape":"numBytes", + "documentation":"

      The total number of bytes in the database.

      " + }, + "numChangesets":{ + "shape":"numChangesets", + "documentation":"

      The total number of changesets in the database.

      " + }, + "numFiles":{ + "shape":"numFiles", + "documentation":"

      The total number of files in the database.

      " + } + } + }, + "GetKxEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } }, - "EnvironmentName":{ + "GetKxEnvironmentResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "awsAccountId":{ + "shape":"IdType", + "documentation":"

      The unique identifier of the AWS account that is used to create the kdb environment.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The status of the kdb environment.

      " + }, + "tgwStatus":{ + "shape":"tgwStatus", + "documentation":"

      The status of the network configuration.

      " + }, + "dnsStatus":{ + "shape":"dnsStatus", + "documentation":"

      The status of DNS configuration.

      " + }, + "errorMessage":{ + "shape":"EnvironmentErrorMessage", + "documentation":"

      Specifies the error message that appears if a flow fails.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description for the kdb environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The ARN identifier of the environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The KMS key ID to encrypt your data in the FinSpace environment.

      " + }, + "dedicatedServiceAccountId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the AWS environment infrastructure account.

      " + }, + "transitGatewayConfiguration":{"shape":"TransitGatewayConfiguration"}, + "customDNSConfiguration":{ + "shape":"CustomDNSConfiguration", + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was created in FinSpace.

      " + }, + "updateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was updated.

      " + }, + "availabilityZoneIds":{ + "shape":"AvailabilityZoneIds", + "documentation":"

      The identifier of the availability zones where subnets for the environment are created.

      " + }, + "certificateAuthorityArn":{ + "shape":"stringValueLength1to255", + "documentation":"

      The Amazon Resource Name (ARN) of the certificate authority of the kdb environment.

      " + } + } + }, + "GetKxUserRequest":{ + "type":"structure", + "required":[ + "userName", + "environmentId" + ], + "members":{ + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      ", + "location":"uri", + "locationName":"userName" + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + } + } + }, + "GetKxUserResponse":{ + "type":"structure", + "members":{ + "userName":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the user.

      " + }, + "userArn":{ + "shape":"KxUserArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that is associated with the user.

      " + } + } + }, + "IPAddressType":{ + "type":"string", + "enum":["IP_V4"] + }, + "IdType":{ + "type":"string", + "max":26, + "min":1, + "pattern":"^[a-zA-Z0-9]{1,26}$" + }, + "InitializationScriptFilePath":{ "type":"string", "max":255, "min":1, - "pattern":"^[a-zA-Z0-9]+[a-zA-Z0-9-]*[a-zA-Z0-9]$" + "pattern":"^[a-zA-Z0-9\\_\\-\\.\\/\\\\]+$" }, - "EnvironmentStatus":{ + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

      The request processing has failed because of an unknown error, exception or failure.

      ", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

      The request is invalid. Something is wrong with the input to the request.

      ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "KmsKeyARN":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^arn:aws:kms:.*:\\d+:.*$" + }, + "KmsKeyId":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[a-zA-Z-0-9-:\\/]*$" + }, + "KxAzMode":{ "type":"string", "enum":[ - "CREATE_REQUESTED", + "SINGLE", + "MULTI" + ] + }, + "KxCacheStorageConfiguration":{ + "type":"structure", + "required":[ + "type", + "size" + ], + "members":{ + "type":{ + "shape":"KxCacheStorageType", + "documentation":"

      The type of cache storage . The valid values are:

      • CACHE_1000 – This type provides at least 1000 MB/s disk access throughput.

      " + }, + "size":{ + "shape":"KxCacheStorageSize", + "documentation":"

      The size of cache in Gigabytes.

      " + } + }, + "documentation":"

      The configuration for read only disk cache associated with a cluster.

      " + }, + "KxCacheStorageConfigurations":{ + "type":"list", + "member":{"shape":"KxCacheStorageConfiguration"} + }, + "KxCacheStorageSize":{ + "type":"integer", + "max":33600, + "min":1200 + }, + "KxCacheStorageType":{ + "type":"string", + "max":10, + "min":8 + }, + "KxChangesetListEntry":{ + "type":"structure", + "members":{ + "changesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier for the changeset.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "activeFromTimestamp":{ + "shape":"Timestamp", + "documentation":"

      Beginning time from which the changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the changeset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "status":{ + "shape":"ChangesetStatus", + "documentation":"

      Status of the changeset.

      • Pending – Changeset creation is pending.

      • Processing – Changeset creation is running.

      • Failed – Changeset creation has failed.

      • Complete – Changeset creation has succeeded.

      " + } + }, + "documentation":"

      Details of changeset.

      " + }, + "KxChangesets":{ + "type":"list", + "member":{"shape":"KxChangesetListEntry"} + }, + "KxCluster":{ + "type":"structure", + "members":{ + "status":{ + "shape":"KxClusterStatus", + "documentation":"

      The status of a cluster.

      • PENDING – The cluster is pending creation.

      • CREATING –The cluster creation process is in progress.

      • CREATE_FAILED– The cluster creation process has failed.

      • RUNNING – The cluster creation process is running.

      • UPDATING – The cluster is in the process of being updated.

      • DELETING – The cluster is in the process of being deleted.

      • DELETED – The cluster has been deleted.

      • DELETE_FAILED – The cluster failed to delete.

      " + }, + "statusReason":{ + "shape":"KxClusterStatusReason", + "documentation":"

      The error message when a failed state occurs.

      " + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster.

      " + }, + "clusterType":{ + "shape":"KxClusterType", + "documentation":"

      Specifies the type of KDB database that is being created. The following types are available:

      • HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.

      • RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter.

      • GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.

      " + }, + "clusterDescription":{ + "shape":"KxClusterDescription", + "documentation":"

      A description of the cluster.

      " + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

      A version of the FinSpace managed kdb to run.

      " + }, + "initializationScript":{ + "shape":"InitializationScriptFilePath", + "documentation":"

      Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.

      " + }, + "executionRole":{ + "shape":"ExecutionRoleArn", + "documentation":"

      An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster.

      " + }, + "azMode":{ + "shape":"KxAzMode", + "documentation":"

      The number of availability zones assigned per cluster. This can be one of the following

      • SINGLE – Assigns one availability zone per cluster.

      • MULTI – Assigns all the availability zones per cluster.

      " + }, + "availabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

      The availability zone identifiers for the requested regions.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the cluster was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the cluster was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + } + }, + "documentation":"

      The details of a kdb cluster.

      " + }, + "KxClusterDescription":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[a-zA-Z0-9\\_\\-\\.\\s]+$" + }, + "KxClusterName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-_]*[a-zA-Z0-9]$" + }, + "KxClusterNodeIdString":{ + "type":"string", + "max":40, + "min":1 + }, + "KxClusterStatus":{ + "type":"string", + "enum":[ + "PENDING", "CREATING", - "CREATED", - "DELETE_REQUESTED", + "CREATE_FAILED", + "RUNNING", + "UPDATING", "DELETING", "DELETED", - "FAILED_CREATION", - "RETRY_DELETION", - "FAILED_DELETION", - "SUSPENDED" + "DELETE_FAILED" ] }, - "FederationAttributeKey":{ + "KxClusterStatusReason":{ "type":"string", - "max":32, + "max":250, "min":1, - "pattern":".*" + "pattern":"^[a-zA-Z0-9\\_\\-\\.\\s]+$" }, - "FederationMode":{ + "KxClusterType":{ "type":"string", "enum":[ - "FEDERATED", - "LOCAL" + "HDB", + "RDB", + "GATEWAY" ] }, - "FederationParameters":{ + "KxClusters":{ + "type":"list", + "member":{"shape":"KxCluster"} + }, + "KxCommandLineArgument":{ "type":"structure", "members":{ - "samlMetadataDocument":{ - "shape":"SamlMetadataDocument", - "documentation":"

      SAML 2.0 Metadata document from identity provider (IdP).

      " + "key":{ + "shape":"KxCommandLineArgumentKey", + "documentation":"

      The name of the key.

      " }, - "samlMetadataURL":{ - "shape":"url", - "documentation":"

      Provide the metadata URL from your SAML 2.0 compliant identity provider (IdP).

      " + "value":{ + "shape":"KxCommandLineArgumentValue", + "documentation":"

      The value of the key.

      " + } + }, + "documentation":"

      Defines the key-value pairs to make them available inside the cluster.

      " + }, + "KxCommandLineArgumentKey":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^(?![Aa][Ww][Ss])(s|([a-zA-Z][a-zA-Z0-9_]+))" + }, + "KxCommandLineArgumentValue":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_:.]*" + }, + "KxCommandLineArguments":{ + "type":"list", + "member":{"shape":"KxCommandLineArgument"} + }, + "KxDatabaseCacheConfiguration":{ + "type":"structure", + "required":[ + "cacheType", + "dbPaths" + ], + "members":{ + "cacheType":{ + "shape":"KxCacheStorageType", + "documentation":"

      The type of disk cache. This parameter is used to map the database path to cache storage. The valid values are:

      • CACHE_1000 – This type provides at least 1000 MB/s disk access throughput.

      " + }, + "dbPaths":{ + "shape":"DbPaths", + "documentation":"

      Specifies the portions of database that will be loaded into the cache for access.

      " + } + }, + "documentation":"

      The structure of database cache configuration that is used for mapping database paths to cache types in clusters.

      " + }, + "KxDatabaseCacheConfigurations":{ + "type":"list", + "member":{"shape":"KxDatabaseCacheConfiguration"} + }, + "KxDatabaseConfiguration":{ + "type":"structure", + "required":["databaseName"], + "members":{ + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database. When this parameter is specified in the structure, S3 with the whole database is included by default.

      " + }, + "cacheConfigurations":{ + "shape":"KxDatabaseCacheConfigurations", + "documentation":"

      Configuration details for the disk cache used to increase performance reading from a kdb database mounted to the cluster.

      " + }, + "changesetId":{ + "shape":"ChangesetId", + "documentation":"

      A unique identifier of the changeset that is associated with the cluster.

      " + } + }, + "documentation":"

      The configuration of data that is available for querying from this database.

      " + }, + "KxDatabaseConfigurations":{ + "type":"list", + "member":{"shape":"KxDatabaseConfiguration"} + }, + "KxDatabaseListEntry":{ + "type":"structure", + "members":{ + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "createdTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the database was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the database was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + } + }, + "documentation":"

      Details about a FinSpace managed kdb database

      " + }, + "KxDatabases":{ + "type":"list", + "member":{"shape":"KxDatabaseListEntry"} + }, + "KxEnvironment":{ + "type":"structure", + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "awsAccountId":{ + "shape":"IdType", + "documentation":"

      The unique identifier of the AWS account in which you create the kdb environment.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The status of the environment creation.

      • CREATE_REQUESTED – Environment creation has been requested.

      • CREATING – Environment is in the process of being created.

      • FAILED_CREATION – Environment creation has failed.

      • CREATED – Environment is successfully created and is currently active.

      • DELETE REQUESTED – Environment deletion has been requested.

      • DELETING – Environment is in the process of being deleted.

      • RETRY_DELETION – Initial environment deletion failed, system is reattempting delete.

      • DELETED – Environment has been deleted.

      • FAILED_DELETION – Environment deletion has failed.

      " + }, + "tgwStatus":{ + "shape":"tgwStatus", + "documentation":"

      The status of the network configuration.

      " + }, + "dnsStatus":{ + "shape":"dnsStatus", + "documentation":"

      The status of DNS configuration.

      " + }, + "errorMessage":{ + "shape":"EnvironmentErrorMessage", + "documentation":"

      Specifies the error message that appears if a flow fails.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the kdb environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The Amazon Resource Name (ARN) of your kdb environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The unique identifier of the KMS key.

      " + }, + "dedicatedServiceAccountId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the AWS environment infrastructure account.

      " + }, + "transitGatewayConfiguration":{ + "shape":"TransitGatewayConfiguration", + "documentation":"

      Specifies the transit gateway and network configuration to connect the kdb environment to an internal network.

      " }, - "applicationCallBackURL":{ - "shape":"url", - "documentation":"

      The redirect or sign-in URL that should be entered into the SAML 2.0 compliant identity provider configuration (IdP).

      " + "customDNSConfiguration":{ + "shape":"CustomDNSConfiguration", + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " }, - "federationURN":{ - "shape":"urn", - "documentation":"

      The Uniform Resource Name (URN). Also referred as Service Provider URN or Audience URI or Service Provider Entity ID.

      " + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " }, - "federationProviderName":{ - "shape":"FederationProviderName", - "documentation":"

      Name of the identity provider (IdP).

      " + "updateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was modified in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " }, - "attributeMap":{ - "shape":"AttributeMap", - "documentation":"

      SAML attribute name and value. The name must always be Email and the value should be set to the attribute definition in which user email is set. For example, name would be Email and value http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress. Please check your SAML 2.0 compliant identity provider (IdP) documentation for details.

      " + "availabilityZoneIds":{ + "shape":"AvailabilityZoneIds", + "documentation":"

      The identifier of the availability zones where subnets for the environment are created.

      " + }, + "certificateAuthorityArn":{ + "shape":"stringValueLength1to255", + "documentation":"

      The Amazon Resource Name (ARN) of the certificate authority:

      " } }, - "documentation":"

      Configuration information when authentication mode is FEDERATED.

      " + "documentation":"

      The details of a kdb environment.

      " }, - "FederationProviderName":{ + "KxEnvironmentId":{ "type":"string", "max":32, "min":1, - "pattern":"[^_\\p{Z}][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_\\p{Z}]+" + "pattern":"^[a-z0-9]+$" }, - "GetEnvironmentRequest":{ + "KxEnvironmentList":{ + "type":"list", + "member":{"shape":"KxEnvironment"} + }, + "KxEnvironmentName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-_]*[a-zA-Z0-9]$" + }, + "KxNode":{ "type":"structure", - "required":["environmentId"], "members":{ - "environmentId":{ - "shape":"IdType", - "documentation":"

      The identifier of the FinSpace environment.

      ", - "location":"uri", - "locationName":"environmentId" + "nodeId":{ + "shape":"KxClusterNodeIdString", + "documentation":"

      A unique identifier for the node.

      " + }, + "availabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

      The identifier of the availability zones where subnets for the environment are created.

      " + }, + "launchTime":{ + "shape":"Timestamp", + "documentation":"

      The time when a particular node is started. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " } - } + }, + "documentation":"

      A structure that stores metadata for a kdb node.

      " }, - "GetEnvironmentResponse":{ + "KxNodeSummaries":{ + "type":"list", + "member":{"shape":"KxNode"} + }, + "KxSavedownStorageConfiguration":{ "type":"structure", + "required":[ + "type", + "size" + ], "members":{ - "environment":{ - "shape":"Environment", - "documentation":"

      The name of the FinSpace environment.

      " + "type":{ + "shape":"KxSavedownStorageType", + "documentation":"

      The type of writeable storage space for temporarily storing your savedown data. The valid values are:

      • SDS01 – This type represents 3000 IOPS and io2 ebs volume type.

      " + }, + "size":{ + "shape":"KxSavedownStorageSize", + "documentation":"

      The size of temporary storage in bytes.

      " } - } + }, + "documentation":"

      The size and type of temporary storage that is used to hold data during the savedown process. All the data written to this storage space is lost when the cluster node is restarted.

      " }, - "IdType":{ + "KxSavedownStorageSize":{ + "type":"integer", + "max":16000, + "min":4 + }, + "KxSavedownStorageType":{ "type":"string", - "max":26, - "min":1, - "pattern":"^[a-zA-Z0-9]{1,26}$" + "enum":["SDS01"] }, - "InternalServerException":{ + "KxUser":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "userArn":{ + "shape":"KxUserArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

      " + }, + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      " + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that is associated with the user.

      " + }, + "createTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb user was created.

      " + }, + "updateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb user was updated.

      " + } }, - "documentation":"

      The request processing has failed because of an unknown error, exception or failure.

      ", - "error":{"httpStatusCode":500}, - "exception":true + "documentation":"

      A structure that stores metadata for a kdb user.

      " }, - "InvalidRequestException":{ - "type":"structure", - "members":{ - "message":{"shape":"errorMessage"} - }, - "documentation":"

      The request is invalid. Something is wrong with the input to the request.

      ", - "error":{"httpStatusCode":400}, - "exception":true + "KxUserArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d+:kxEnvironment/[0-9A-Za-z_-]{1,128}/kxUser/[0-9A-Za-z_-]{1,128}$" }, - "KmsKeyId":{ + "KxUserList":{ + "type":"list", + "member":{"shape":"KxUser"} + }, + "KxUserNameString":{ "type":"string", - "max":1000, + "max":50, "min":1, - "pattern":"^[a-zA-Z-0-9-:\\/]*$" + "pattern":"^[0-9A-Za-z_-]{1,50}$" }, "LimitExceededException":{ "type":"structure", @@ -450,7 +2581,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

      A token generated by FinSpace that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the nextToken value from the response object of the previous page call.

      ", + "documentation":"

      A token generated by FinSpace that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the nextTokennextToken value from the response object of the previous page call.

      ", "location":"querystring", "locationName":"nextToken" }, @@ -475,12 +2606,251 @@ } } }, + "ListKxChangesetsRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      ", + "location":"uri", + "locationName":"databaseName" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKxChangesetsResponse":{ + "type":"structure", + "members":{ + "kxChangesets":{ + "shape":"KxChangesets", + "documentation":"

      A list of changesets for a database.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, + "ListKxClusterNodesRequest":{ + "type":"structure", + "required":[ + "clusterName", + "environmentId" + ], + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster.

      ", + "location":"uri", + "locationName":"clusterName" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ResultLimit", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKxClusterNodesResponse":{ + "type":"structure", + "members":{ + "nodes":{ + "shape":"KxNodeSummaries", + "documentation":"

      A list of nodes associated with the cluster.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, + "ListKxClustersRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterType":{ + "shape":"KxClusterType", + "documentation":"

      Specifies the type of KDB database that is being created. The following types are available:

      • HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.

      • RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter.

      • GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.

      ", + "location":"querystring", + "locationName":"clusterType" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListKxClustersResponse":{ + "type":"structure", + "members":{ + "kxClusterSummaries":{ + "shape":"KxClusters", + "documentation":"

      Lists the cluster details.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, + "ListKxDatabasesRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKxDatabasesResponse":{ + "type":"structure", + "members":{ + "kxDatabases":{ + "shape":"KxDatabases", + "documentation":"

      A list of databases in the kdb environment.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, + "ListKxEnvironmentsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"BoxedInteger", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKxEnvironmentsResponse":{ + "type":"structure", + "members":{ + "environments":{ + "shape":"KxEnvironmentList", + "documentation":"

      A list of environments in an account.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, + "ListKxUsersRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ResultLimit", + "documentation":"

      The maximum number of results to return in this request.

      ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListKxUsersResponse":{ + "type":"structure", + "members":{ + "users":{ + "shape":"KxUserList", + "documentation":"

      A list of users in a kdb environment.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      A token that indicates where a results page should begin.

      " + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], "members":{ "resourceArn":{ - "shape":"EnvironmentArn", + "shape":"FinSpaceTaggableArn", "documentation":"

      The Amazon Resource Name of the resource.

      ", "location":"uri", "locationName":"resourceArn" @@ -496,11 +2866,27 @@ } } }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, "NameString":{ "type":"string", "max":50, "min":1, - "pattern":"^[a-zA-Z0-9]{1,50}$" + "pattern":"^[a-zA-Z0-9]{1,50}$" + }, + "NodeCount":{ + "type":"integer", + "max":5, + "min":1 + }, + "NodeType":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[a-zA-Z0-9._]+" }, "PaginationToken":{ "type":"string", @@ -508,13 +2894,28 @@ "min":1, "pattern":".*" }, + "ReleaseLabel":{ + "type":"string", + "max":16, + "min":1, + "pattern":"^[a-zA-Z0-9._-]+" + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

      The specified resource group already exists.

      ", + "error":{"httpStatusCode":409}, + "exception":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ "message":{"shape":"errorMessage"} }, "documentation":"

      One or more resources can't be found.

      ", - "error":{"httpStatusCode":400}, + "error":{"httpStatusCode":404}, "exception":true }, "ResultLimit":{ @@ -522,12 +2923,51 @@ "max":100, "min":0 }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + }, + "S3Bucket":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^[a-z0-9][a-z0-9\\.\\-]*[a-z0-9]$" + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\!\\-_\\.\\*'\\(\\)]+$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1000, + "min":1 + }, + "S3Path":{ + "type":"string", + "max":1093, + "min":9, + "pattern":"^s3:\\/\\/[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\\/([^\\/]+\\/)*[^\\/]*$" + }, "SamlMetadataDocument":{ "type":"string", "max":10000000, "min":1000, "pattern":".*" }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"SecurityGroupIdString"} + }, + "SecurityGroupIdString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^sg-([a-z0-9]{8}$|[a-z0-9]{17}$)" + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -537,12 +2977,29 @@ "error":{"httpStatusCode":402}, "exception":true }, + "SignedKxConnectionString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(:|:tcps:\\/\\/)[a-zA-Z0-9-\\.\\_]+:\\d+:[a-zA-Z0-9-\\.\\_]+:\\S+$", + "sensitive":true + }, "SmsDomainUrl":{ "type":"string", "max":1000, "min":1, "pattern":"^[a-zA-Z-0-9-:\\/.]*$" }, + "SubnetIdList":{ + "type":"list", + "member":{"shape":"SubnetIdString"} + }, + "SubnetIdString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^subnet-([a-z0-9]{8}$|[a-z0-9]{17}$)" + }, "SuperuserParameters":{ "type":"structure", "required":[ @@ -593,7 +3050,7 @@ ], "members":{ "resourceArn":{ - "shape":"EnvironmentArn", + "shape":"FinSpaceTaggableArn", "documentation":"

      The Amazon Resource Name (ARN) for the resource.

      ", "location":"uri", "locationName":"resourceArn" @@ -623,6 +3080,30 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"timestamp"}, + "TransitGatewayConfiguration":{ + "type":"structure", + "required":[ + "transitGatewayID", + "routableCIDRSpace" + ], + "members":{ + "transitGatewayID":{ + "shape":"TransitGatewayID", + "documentation":"

      The identifier of the transit gateway created by the customer to connect outbound traffics from kdb network to your internal network.

      " + }, + "routableCIDRSpace":{ + "shape":"ValidCIDRSpace", + "documentation":"

      The routing CIDR on behalf of kdb environment. It could be any \"/26 range in the 100.64.0.0 CIDR space. After providing, it will be added to the customer's transit gateway routing table so that the traffics could be routed to kdb network.

      " + } + }, + "documentation":"

      The structure of the transit gateway and network configuration that is used to connect the kdb environment to an internal network.

      " + }, + "TransitGatewayID":{ + "type":"string", + "max":32, + "min":1 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -631,7 +3112,7 @@ ], "members":{ "resourceArn":{ - "shape":"EnvironmentArn", + "shape":"FinSpaceTaggableArn", "documentation":"

      A FinSpace resource from which you want to remove a tag or tags. The value for this parameter is an Amazon Resource Name (ARN).

      ", "location":"uri", "locationName":"resourceArn" @@ -683,6 +3164,338 @@ } } }, + "UpdateKxClusterDatabasesRequest":{ + "type":"structure", + "required":[ + "environmentId", + "clusterName", + "databases" + ], + "members":{ + "environmentId":{ + "shape":"KxEnvironmentId", + "documentation":"

      The unique identifier of a kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "clusterName":{ + "shape":"KxClusterName", + "documentation":"

      A unique name for the cluster that you want to modify.

      ", + "location":"uri", + "locationName":"clusterName" + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + }, + "databases":{ + "shape":"KxDatabaseConfigurations", + "documentation":"

      The structure of databases mounted on the cluster.

      " + } + } + }, + "UpdateKxClusterDatabasesResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateKxDatabaseRequest":{ + "type":"structure", + "required":[ + "environmentId", + "databaseName", + "clientToken" + ], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      ", + "location":"uri", + "locationName":"databaseName" + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the database.

      " + }, + "clientToken":{ + "shape":"ClientTokenString", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      ", + "idempotencyToken":true + } + } + }, + "UpdateKxDatabaseResponse":{ + "type":"structure", + "members":{ + "databaseName":{ + "shape":"DatabaseName", + "documentation":"

      The name of the kdb database.

      " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the database.

      " + }, + "lastModifiedTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The last time that the database was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.

      " + } + } + }, + "UpdateKxEnvironmentNetworkRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "transitGatewayConfiguration":{ + "shape":"TransitGatewayConfiguration", + "documentation":"

      Specifies the transit gateway and network configuration to connect the kdb environment to an internal network.

      " + }, + "customDNSConfiguration":{ + "shape":"CustomDNSConfiguration", + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + } + } + }, + "UpdateKxEnvironmentNetworkResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "awsAccountId":{ + "shape":"IdType", + "documentation":"

      The unique identifier of the AWS account that is used to create the kdb environment.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The status of the kdb environment.

      " + }, + "tgwStatus":{ + "shape":"tgwStatus", + "documentation":"

      The status of the network configuration.

      " + }, + "dnsStatus":{ + "shape":"dnsStatus", + "documentation":"

      The status of DNS configuration.

      " + }, + "errorMessage":{ + "shape":"EnvironmentErrorMessage", + "documentation":"

      Specifies the error message that appears if a flow fails.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The ARN identifier of the environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The KMS key ID to encrypt your data in the FinSpace environment.

      " + }, + "dedicatedServiceAccountId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the AWS environment infrastructure account.

      " + }, + "transitGatewayConfiguration":{"shape":"TransitGatewayConfiguration"}, + "customDNSConfiguration":{ + "shape":"CustomDNSConfiguration", + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was created in FinSpace.

      " + }, + "updateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was updated.

      " + }, + "availabilityZoneIds":{ + "shape":"AvailabilityZoneIds", + "documentation":"

      The identifier of the availability zones where subnets for the environment are created.

      " + } + } + }, + "UpdateKxEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the kdb environment.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + } + } + }, + "UpdateKxEnvironmentResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"KxEnvironmentName", + "documentation":"

      The name of the kdb environment.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "awsAccountId":{ + "shape":"IdType", + "documentation":"

      The unique identifier of the AWS account that is used to create the kdb environment.

      " + }, + "status":{ + "shape":"EnvironmentStatus", + "documentation":"

      The status of the kdb environment.

      " + }, + "tgwStatus":{ + "shape":"tgwStatus", + "documentation":"

      The status of the network configuration.

      " + }, + "dnsStatus":{ + "shape":"dnsStatus", + "documentation":"

      The status of DNS configuration.

      " + }, + "errorMessage":{ + "shape":"EnvironmentErrorMessage", + "documentation":"

      Specifies the error message that appears if a flow fails.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the environment.

      " + }, + "environmentArn":{ + "shape":"EnvironmentArn", + "documentation":"

      The ARN identifier of the environment.

      " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

      The KMS key ID to encrypt your data in the FinSpace environment.

      " + }, + "dedicatedServiceAccountId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the AWS environment infrastructure account.

      " + }, + "transitGatewayConfiguration":{"shape":"TransitGatewayConfiguration"}, + "customDNSConfiguration":{ + "shape":"CustomDNSConfiguration", + "documentation":"

      A list of DNS server name and server IP. This is used to set up Route-53 outbound resolvers.

      " + }, + "creationTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was created in FinSpace.

      " + }, + "updateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp at which the kdb environment was updated.

      " + }, + "availabilityZoneIds":{ + "shape":"AvailabilityZoneIds", + "documentation":"

      The identifier of the availability zones where subnets for the environment are created.

      " + } + } + }, + "UpdateKxUserRequest":{ + "type":"structure", + "required":[ + "environmentId", + "userName", + "iamRole" + ], + "members":{ + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      ", + "location":"uri", + "locationName":"environmentId" + }, + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      ", + "location":"uri", + "locationName":"userName" + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that is associated with the user.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A token that ensures idempotency. This token expires in 10 minutes.

      " + } + } + }, + "UpdateKxUserResponse":{ + "type":"structure", + "members":{ + "userName":{ + "shape":"KxUserNameString", + "documentation":"

      A unique identifier for the user.

      " + }, + "userArn":{ + "shape":"KxUserArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

      " + }, + "environmentId":{ + "shape":"IdType", + "documentation":"

      A unique identifier for the kdb environment.

      " + }, + "iamRole":{ + "shape":"RoleArn", + "documentation":"

      The IAM role ARN that is associated with the user.

      " + } + } + }, + "ValidCIDRSpace":{ + "type":"string", + "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/26$" + }, + "ValidHostname":{ + "type":"string", + "max":255, + "min":3, + "pattern":"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])(\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9]))*$" + }, + "ValidIPAddress":{ + "type":"string", + "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" + }, "ValidationException":{ "type":"structure", "members":{ @@ -692,7 +3505,63 @@ "error":{"httpStatusCode":400}, "exception":true }, + "VpcConfiguration":{ + "type":"structure", + "members":{ + "vpcId":{ + "shape":"VpcIdString", + "documentation":"

      The identifier of the VPC endpoint.

      " + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

      The unique identifier of the VPC security group applied to the VPC endpoint ENI for the cluster.

      " + }, + "subnetIds":{ + "shape":"SubnetIdList", + "documentation":"

      The identifier of the subnet that the Privatelink VPC endpoint uses to connect to the cluster.

      " + }, + "ipAddressType":{ + "shape":"IPAddressType", + "documentation":"

      The IP address type for cluster network configuration parameters. The following type is available:

      • IP_V4 – IP address version 4

      " + } + }, + "documentation":"

      Configuration details about the network where the Privatelink endpoint of the cluster resides.

      " + }, + "VpcIdString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^vpc-([a-z0-9]{8}$|[a-z0-9]{17}$)" + }, + "dnsStatus":{ + "type":"string", + "enum":[ + "NONE", + "UPDATE_REQUESTED", + "UPDATING", + "FAILED_UPDATE", + "SUCCESSFULLY_UPDATED" + ] + }, "errorMessage":{"type":"string"}, + "numBytes":{"type":"long"}, + "numChangesets":{"type":"integer"}, + "numFiles":{"type":"integer"}, + "stringValueLength1to255":{ + "type":"string", + "max":255, + "min":1 + }, + "tgwStatus":{ + "type":"string", + "enum":[ + "NONE", + "UPDATE_REQUESTED", + "UPDATING", + "FAILED_UPDATE", + "SUCCESSFULLY_UPDATED" + ] + }, "url":{ "type":"string", "max":1000, diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index fe80b55621c3..09558de03f50 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 4f70e9eb4700..575cb3d55e2c 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 8045b8655207..f6e03b9972c2 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index a34681a0046a..07337aa82794 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/service-2.json b/services/fms/src/main/resources/codegen-resources/service-2.json index 28e3069018bd..4dfbe8d02aa2 100644 --- a/services/fms/src/main/resources/codegen-resources/service-2.json +++ b/services/fms/src/main/resources/codegen-resources/service-2.json @@ -208,7 +208,8 @@ {"shape":"InvalidOperationException"}, {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"LimitExceededException"} ], "documentation":"

      Returns information about the specified account's administrative scope. The admistrative scope defines the resources that an Firewall Manager administrator can manage.

      " }, @@ -362,7 +363,8 @@ "errors":[ {"shape":"InvalidOperationException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"LimitExceededException"} ], "documentation":"

      Returns a AdminAccounts object that lists the Firewall Manager administrators within the organization that are onboarded to Firewall Manager by AssociateAdminAccount.

      This operation can be called only from the organization's management account.

      " }, diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index e61367a4d031..6f481713bfd2 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 460baa458adf..5b164dd56d1c 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 6f364775fced..447e10b9dfab 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/frauddetector/src/main/resources/codegen-resources/endpoint-tests.json b/services/frauddetector/src/main/resources/codegen-resources/endpoint-tests.json index 5b98ea5ac793..9c96f885c7eb 100644 --- a/services/frauddetector/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/frauddetector/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -217,8 +217,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -230,8 +241,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -243,8 +265,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -256,8 +289,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +313,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +326,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -295,8 +339,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -307,8 +351,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -319,10 +363,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/frauddetector/src/main/resources/codegen-resources/service-2.json b/services/frauddetector/src/main/resources/codegen-resources/service-2.json index ea033f2c024e..8f8a28fd9de8 100644 --- a/services/frauddetector/src/main/resources/codegen-resources/service-2.json +++ b/services/frauddetector/src/main/resources/codegen-resources/service-2.json @@ -308,7 +308,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

      Deletes the specified event.

      When you delete an event, Amazon Fraud Detector permanently deletes that event and the event data is no longer stored in Amazon Fraud Detector.

      " + "documentation":"

      Deletes the specified event.

      When you delete an event, Amazon Fraud Detector permanently deletes that event and the event data is no longer stored in Amazon Fraud Detector. If deleteAuditHistory is True, event data is available through search for up to 30 seconds after the delete operation is completed.

      " }, "DeleteEventType":{ "name":"DeleteEventType", @@ -2003,7 +2003,7 @@ }, "dataType":{ "shape":"DataType", - "documentation":"

      The data type.

      " + "documentation":"

      The data type of the variable.

      " }, "dataSource":{ "shape":"DataSource", @@ -2051,7 +2051,8 @@ "STRING", "INTEGER", "FLOAT", - "BOOLEAN" + "BOOLEAN", + "DATETIME" ] }, "DataValidationMetrics":{ @@ -2168,7 +2169,7 @@ }, "deleteAuditHistory":{ "shape":"DeleteAuditHistory", - "documentation":"

      Specifies whether or not to delete any predictions associated with the event.

      " + "documentation":"

      Specifies whether or not to delete any predictions associated with the event. If set to True,

      " } } }, @@ -2694,6 +2695,17 @@ "DISABLED" ] }, + "EventOrchestration":{ + "type":"structure", + "required":["eventBridgeEnabled"], + "members":{ + "eventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

      Specifies if event orchestration is enabled through Amazon EventBridge.

      " + } + }, + "documentation":"

      The event orchestration status.

      " + }, "EventPredictionSummary":{ "type":"structure", "members":{ @@ -2772,6 +2784,10 @@ "arn":{ "shape":"fraudDetectorArn", "documentation":"

      The entity type ARN.

      " + }, + "eventOrchestration":{ + "shape":"EventOrchestration", + "documentation":"

      The event orchestration status.

      " } }, "documentation":"

      The event type details.

      ", @@ -3836,7 +3852,7 @@ }, "unlabeledEventsTreatment":{ "shape":"UnlabeledEventsTreatment", - "documentation":"

      The action to take for unlabeled events.

      • Use IGNORE if you want the unlabeled events to be ignored. This is recommended when the majority of the events in the dataset are labeled.

      • Use FRAUD if you want to categorize all unlabeled events as “Fraud”. This is recommended when most of the events in your dataset are fraudulent.

      • Use LEGIT f you want to categorize all unlabeled events as “Legit”. This is recommended when most of the events in your dataset are legitimate.

      • Use AUTO if you want Amazon Fraud Detector to decide how to use the unlabeled data. This is recommended when there is significant unlabeled events in the dataset.

      By default, Amazon Fraud Detector ignores the unlabeled data.

      " + "documentation":"

      The action to take for unlabeled events.

      • Use IGNORE if you want the unlabeled events to be ignored. This is recommended when the majority of the events in the dataset are labeled.

      • Use FRAUD if you want to categorize all unlabeled events as “Fraud”. This is recommended when most of the events in your dataset are fraudulent.

      • Use LEGIT if you want to categorize all unlabeled events as “Legit”. This is recommended when most of the events in your dataset are legitimate.

      • Use AUTO if you want Amazon Fraud Detector to decide how to use the unlabeled data. This is recommended when there is significant unlabeled events in the dataset.

      By default, Amazon Fraud Detector ignores the unlabeled data.

      " } }, "documentation":"

      The label schema.

      " @@ -4519,11 +4535,15 @@ }, "eventIngestion":{ "shape":"EventIngestion", - "documentation":"

      Specifies if ingenstion is enabled or disabled.

      " + "documentation":"

      Specifies if ingestion is enabled or disabled.

      " }, "tags":{ "shape":"tagList", "documentation":"

      A collection of key and value pairs.

      " + }, + "eventOrchestration":{ + "shape":"EventOrchestration", + "documentation":"

      Enables or disables event orchestration. If enabled, you can send event predictions to select AWS services for downstream processing of the events.

      " } } }, @@ -4607,7 +4627,7 @@ }, "tags":{ "shape":"tagList", - "documentation":"

      " + "documentation":"

      A collection of key and value pairs.

      " } } }, @@ -5012,7 +5032,7 @@ }, "upperBoundValue":{ "shape":"float", - "documentation":"

      The lower bound value of the area under curve (auc).

      " + "documentation":"

      The upper bound value of the area under curve (auc).

      " } }, "documentation":"

      Range of area under curve (auc) expected from the model. A range greater than 0.1 indicates higher model uncertainity. A range is the difference between upper and lower bound of auc.

      " @@ -5785,5 +5805,5 @@ "pattern":"^([1-9][0-9]*)$" } }, - "documentation":"

      This is the Amazon Fraud Detector API Reference. This guide is for developers who need detailed information about Amazon Fraud Detector API actions, data types, and errors. For more information about Amazon Fraud Detector features, see the Amazon Fraud Detector User Guide.

      We provide the Query API as well as AWS software development kits (SDK) for Amazon Fraud Detector in Java and Python programming languages.

      The Amazon Fraud Detector Query API provides HTTPS requests that use the HTTP verb GET or POST and a Query parameter Action. AWS SDK provides libraries, sample code, tutorials, and other resources for software developers who prefer to build applications using language-specific APIs instead of submitting a request over HTTP or HTTPS. These libraries provide basic functions that automatically take care of tasks such as cryptographically signing your requests, retrying requests, and handling error responses, so that it is easier for you to get started. For more information about the AWS SDKs, see Tools to build on AWS.

      " + "documentation":"

      This is the Amazon Fraud Detector API Reference. This guide is for developers who need detailed information about Amazon Fraud Detector API actions, data types, and errors. For more information about Amazon Fraud Detector features, see the Amazon Fraud Detector User Guide.

      We provide the Query API as well as AWS software development kits (SDK) for Amazon Fraud Detector in Java and Python programming languages.

      The Amazon Fraud Detector Query API provides HTTPS requests that use the HTTP verb GET or POST and a Query parameter Action. AWS SDK provides libraries, sample code, tutorials, and other resources for software developers who prefer to build applications using language-specific APIs instead of submitting a request over HTTP or HTTPS. These libraries provide basic functions that automatically take care of tasks such as cryptographically signing your requests, retrying requests, and handling error responses, so that it is easier for you to get started. For more information about the AWS SDKs, go to Tools to build on AWS page, scroll down to the SDK section, and choose plus (+) sign to expand the section.

      " } diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index e16e8c2cbbcc..e740def4929a 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json b/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json index 283b29959239..ee67cf2679d9 100644 --- a/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,8 +8,8 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -21,8 +21,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -34,8 +34,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -47,8 +47,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -99,8 +99,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -112,8 +112,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -125,8 +125,8 @@ } }, "params": { - "UseFIPS": true, "Region": "ca-central-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -138,8 +138,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -151,8 +151,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -164,8 +164,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -177,8 +177,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -190,8 +190,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -203,8 +203,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -216,8 +216,8 @@ } }, "params": { - "UseFIPS": false, "Region": "me-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -229,8 +229,8 @@ } }, "params": { - "UseFIPS": false, "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -242,8 +242,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -255,8 +255,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -268,8 +268,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -281,8 +281,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -294,8 +294,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -307,8 +307,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -320,8 +320,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -333,8 +333,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -346,8 +346,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -359,8 +359,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -372,8 +372,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -385,8 +385,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -398,8 +398,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -411,8 +411,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -424,8 +424,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -437,8 +437,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -450,8 +450,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -463,8 +463,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -476,8 +476,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -489,8 +489,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -502,8 +502,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -513,8 +513,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -526,8 +526,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -537,8 +537,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -550,8 +550,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -561,8 +561,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -574,8 +574,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -585,8 +585,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -598,8 +598,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -611,8 +611,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -636,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -648,8 +648,8 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index bb23540564b6..bf179cda8275 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -107,7 +107,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

      Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

      Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

      CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

      ", + "documentation":"

      Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

      Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

      CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

      ", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -304,7 +304,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

      Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

      ", + "documentation":"

      Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

      ", "idempotent":true }, "DeleteFileCache":{ @@ -387,7 +387,8 @@ {"shape":"BadRequest"}, {"shape":"IncompatibleParameterError"}, {"shape":"InternalServerError"}, - {"shape":"VolumeNotFound"} + {"shape":"VolumeNotFound"}, + {"shape":"ServiceLimitExceeded"} ], "documentation":"

      Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

      " }, @@ -423,7 +424,7 @@ {"shape":"InvalidDataRepositoryType"}, {"shape":"InternalServerError"} ], - "documentation":"

      Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

      You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

      When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

      ", + "documentation":"

      Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

      You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

      When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

      ", "idempotent":true }, "DescribeDataRepositoryTasks":{ @@ -650,7 +651,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

      Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

      ", + "documentation":"

      Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

      ", "idempotent":true }, "UpdateFileCache":{ @@ -724,7 +725,7 @@ {"shape":"StorageVirtualMachineNotFound"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

      Updates an Amazon FSx for ONTAP storage virtual machine (SVM).

      " + "documentation":"

      Updates an FSx for ONTAP storage virtual machine (SVM).

      " }, "UpdateVolume":{ "name":"UpdateVolume", @@ -972,7 +973,7 @@ }, "AutomaticBackupRetentionDays":{ "type":"integer", - "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

      ", + "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30.

      ", "max":90, "min":0 }, @@ -1531,7 +1532,10 @@ "documentation":"

      Required with PERSISTENT_1 and PERSISTENT_2 deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision.

      Valid values:

      • For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

      • For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

      • For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

      " }, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, - "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, + "AutomaticBackupRetentionDays":{ + "shape":"AutomaticBackupRetentionDays", + "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

      " + }, "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

      (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If CopyTagsToBackups is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

      (Default = false)

      For more information, see Working with backups in the Amazon FSx for Lustre User Guide.

      " @@ -1615,11 +1619,11 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

      Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

      • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).

      • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

      For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

      " + "documentation":"

      Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

      • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MBps. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

      • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

      For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

      " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

      Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:

      • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

      • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.

      You pay for additional throughput capacity that you provision.

      " + "documentation":"

      Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

      • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

      • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

      You pay for additional throughput capacity that you provision.

      " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -1726,7 +1730,7 @@ }, "AutomaticBackupRetentionDays":{ "shape":"AutomaticBackupRetentionDays", - "documentation":"

      The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 90 days.

      " + "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30.

      " }, "CopyTagsToBackups":{ "shape":"Flag", @@ -2008,7 +2012,7 @@ }, "DNSName":{ "type":"string", - "documentation":"

      The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.

      ", + "documentation":"

      The file system's DNS name. You can mount your file system using its DNS name.

      ", "max":275, "min":16, "pattern":"^((fs|fc)i?-[0-9a-f]{8,}\\..{4,253})$" @@ -2080,7 +2084,7 @@ "documentation":"

      The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

      " } }, - "documentation":"

      The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

      • CreateDataRepositoryAssociation

      • UpdateDataRepositoryAssociation

      • DescribeDataRepositoryAssociations

      Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

      " + "documentation":"

      The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

      • CreateDataRepositoryAssociation

      • UpdateDataRepositoryAssociation

      • DescribeDataRepositoryAssociations

      Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

      " }, "DataRepositoryAssociationId":{ "type":"string", @@ -3007,14 +3011,14 @@ "members":{ "Mode":{ "shape":"DiskIopsConfigurationMode", - "documentation":"

      Specifies whether the number of IOPS for the file system is using the system default (AUTOMATIC) or was provisioned by the customer (USER_PROVISIONED).

      " + "documentation":"

      Specifies whether the file system is using the AUTOMATIC setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a USER_PROVISIONED value.

      " }, "Iops":{ "shape":"Iops", "documentation":"

      The total number of SSD IOPS provisioned for the file system.

      " } }, - "documentation":"

      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).

      " + "documentation":"

      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or FSx for OpenZFS file system. By default, Amazon FSx automatically provisions 3 IOPS per GB of storage capacity. You can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how it is was provisioned, or the mode (by the customer or by Amazon FSx).

      " }, "DiskIopsConfigurationMode":{ "type":"string", @@ -4044,7 +4048,11 @@ "documentation":"

      (Multi-AZ only) The VPC route tables in which your file system's endpoints are created.

      " }, "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, - "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"} + "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, + "FsxAdminPassword":{ + "shape":"AdminPassword", + "documentation":"

      You can use the fsxadmin user account to access the NetApp ONTAP CLI and REST API. The password value is always redacted in the response.

      " + } }, "documentation":"

      Configuration for the FSx for NetApp ONTAP file system.

      " }, @@ -4375,7 +4383,7 @@ }, "ProgressPercent":{ "type":"integer", - "documentation":"

      The current percent of progress of an asynchronous task.

      ", + "documentation":"

      Displays the current percent of progress of an asynchronous task.

      ", "max":100, "min":0 }, @@ -4618,25 +4626,37 @@ "documentation":"

      A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

      " } }, - "documentation":"

      The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.

      " + "documentation":"

      The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an FSx for ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx for Windows with your self-managed Microsoft Active Directory or Managing FSx for ONTAP SVMs.

      " }, "SelfManagedActiveDirectoryConfigurationUpdates":{ "type":"structure", "members":{ "UserName":{ "shape":"DirectoryUserName", - "documentation":"

      The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

      " + "documentation":"

      Specifies the updated user name for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

      This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

      " }, "Password":{ "shape":"DirectoryPassword", - "documentation":"

      The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.

      " + "documentation":"

      Specifies the updated password for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

      " }, "DnsIps":{ "shape":"DnsIps", - "documentation":"

      A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

      " + "documentation":"

      A list of up to three DNS server or domain controller IP addresses in your self-managed AD domain.

      " + }, + "DomainName":{ + "shape":"ActiveDirectoryFullyQualifiedName", + "documentation":"

      Specifies an updated fully qualified domain name of your self-managed AD configuration.

      " + }, + "OrganizationalUnitDistinguishedName":{ + "shape":"OrganizationalUnitDistinguishedName", + "documentation":"

      Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD.

      " + }, + "FileSystemAdministratorsGroup":{ + "shape":"FileSystemAdministratorsGroupName", + "documentation":"

      Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges for the Amazon FSx resource.

      " } }, - "documentation":"

      The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.

      " + "documentation":"

      Specifies changes you are making to the self-managed Microsoft Active Directory (AD) configuration to which an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

      " }, "ServiceLimit":{ "type":"string", @@ -4806,13 +4826,13 @@ }, "StorageCapacity":{ "type":"integer", - "documentation":"

      The storage capacity for your Amazon FSx file system, in gibibytes.

      ", + "documentation":"

      Specifies the file system's storage capacity, in gibibytes (GiB).

      ", "max":2147483647, "min":0 }, "StorageType":{ "type":"string", - "documentation":"

      The storage type for your Amazon FSx file system.

      ", + "documentation":"

      Specifies the file system's storage type.

      ", "enum":[ "SSD", "HDD" @@ -4980,11 +5000,11 @@ "members":{ "NetBiosName":{ "shape":"NetBiosAlias", - "documentation":"

      The NetBIOS name of the Active Directory computer object that is joined to your SVM.

      " + "documentation":"

      The NetBIOS name of the AD computer object to which the SVM is joined.

      " }, "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryAttributes"} }, - "documentation":"

      Describes the configuration of the Microsoft Active Directory (AD) directory to which the Amazon FSx for ONTAP storage virtual machine (SVM) is joined. Pleae note, account credentials are not returned in the response payload.

      " + "documentation":"

      Describes the Microsoft Active Directory (AD) directory configuration to which the FSx for ONTAP storage virtual machine (SVM) is joined. Note that account credentials are not returned in the response payload.

      " }, "SvmEndpoint":{ "type":"structure", @@ -5234,7 +5254,10 @@ "documentation":"

      (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

      " }, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, - "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, + "AutomaticBackupRetentionDays":{ + "shape":"AutomaticBackupRetentionDays", + "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

      " + }, "AutoImportPolicy":{ "shape":"AutoImportPolicyType", "documentation":"

      (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

      • NONE - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.

      • NEW - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.

      • NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.

      • NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.

      This parameter is not supported for file systems with a data repository association.

      " @@ -5261,16 +5284,16 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "FsxAdminPassword":{ "shape":"AdminPassword", - "documentation":"

      The ONTAP administrative password for the fsxadmin user.

      " + "documentation":"

      Update the password for the fsxadmin user by entering a new password. You use the fsxadmin user to access the NetApp ONTAP CLI and REST API to manage your file system resources. For more information, see Managing resources using NetApp Applicaton.

      " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{ "shape":"DiskIopsConfiguration", - "documentation":"

      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned.

      " + "documentation":"

      The SSD IOPS (input output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned. For more information, see Updating SSD storage capacity and IOPS.

      " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

      Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.

      " + "documentation":"

      Enter a new value to change the amount of throughput capacity for the file system. Throughput capacity is measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps. For more information, see Managing throughput capacity in the FSx for ONTAP User Guide.

      " }, "AddRouteTableIds":{ "shape":"RouteTableIds", @@ -5320,7 +5343,7 @@ }, "StorageCapacity":{ "shape":"StorageCapacity", - "documentation":"

      Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

      You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

      For Lustre file systems, the storage capacity target value can be the following:

      • For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

      • For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.

      • For SCRATCH_1 file systems, you can't increase the storage capacity.

      For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.

      For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.

      For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.

      For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

      " + "documentation":"

      Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

      You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

      For Lustre file systems, the storage capacity target value can be the following:

      • For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

      • For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.

      • For SCRATCH_1 file systems, you can't increase the storage capacity.

      For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.

      For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.

      For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSxfor Windows File Server User Guide.

      For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

      " }, "WindowsConfiguration":{ "shape":"UpdateFileSystemWindowsConfiguration", @@ -5330,7 +5353,7 @@ "OntapConfiguration":{"shape":"UpdateFileSystemOntapConfiguration"}, "OpenZFSConfiguration":{ "shape":"UpdateFileSystemOpenZFSConfiguration", - "documentation":"

      The configuration updates for an Amazon FSx for OpenZFS file system.

      " + "documentation":"

      The configuration updates for an FSx for OpenZFS file system.

      " } }, "documentation":"

      The request object for the UpdateFileSystem operation.

      " @@ -5358,7 +5381,7 @@ }, "AutomaticBackupRetentionDays":{ "shape":"AutomaticBackupRetentionDays", - "documentation":"

      The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 90 days. For more information, see Working with Automatic Daily Backups.

      " + "documentation":"

      The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30. For more information, see Working with Automatic Daily Backups.

      " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -5479,7 +5502,7 @@ "members":{ "ActiveDirectoryConfiguration":{ "shape":"UpdateSvmActiveDirectoryConfiguration", - "documentation":"

      Updates the Microsoft Active Directory (AD) configuration for an SVM that is joined to an AD.

      " + "documentation":"

      Specifies updates to an SVM's Microsoft Active Directory (AD) configuration.

      " }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -5491,7 +5514,7 @@ }, "SvmAdminPassword":{ "shape":"AdminPassword", - "documentation":"

      Enter a new SvmAdminPassword if you are updating it.

      " + "documentation":"

      Specifies a new SvmAdminPassword.

      " } } }, @@ -5504,9 +5527,13 @@ "UpdateSvmActiveDirectoryConfiguration":{ "type":"structure", "members":{ - "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryConfigurationUpdates"} + "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryConfigurationUpdates"}, + "NetBiosName":{ + "shape":"NetBiosAlias", + "documentation":"

      Specifies an updated NetBIOS name of the AD computer object NetBiosName to which an SVM is joined.

      " + } }, - "documentation":"

      Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. Please note, account credentials are not returned in the response payload.

      " + "documentation":"

      Specifies updates to an FSx for ONTAP storage virtual machine's (SVM) Microsoft Active Directory (AD) configuration. Note that account credentials are not returned in the response payload.

      " }, "UpdateVolumeRequest":{ "type":"structure", diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 2a6f6c2e40cd..a90bf20876b9 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index 8af75a8ee213..f9c6980b7ef5 100644 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -44,7 +44,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

      This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

      Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

      To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information.

      When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

      If you try to claim a specific game server, this request will fail in the following cases:

      • If the game server utilization status is UTILIZED.

      • If the game server claim status is CLAIMED.

      When claiming a specific game server, this request will succeed even if the game server is running on an instance in DRAINING status. To avoid this, first check the instance status by calling DescribeGameServerInstances .

      Learn more

      Amazon GameLift FleetIQ Guide

      " + "documentation":"

      This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

      Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

      To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Filter options may be included to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed.

      When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

      If you try to claim a specific game server, this request will fail in the following cases:

      • If the game server utilization status is UTILIZED.

      • If the game server claim status is CLAIMED.

      • If the game server is running on an instance in DRAINING status and provided filter option does not allow placing on DRAINING instances.

      Learn more

      Amazon GameLift FleetIQ Guide

      " }, "CreateAlias":{ "name":"CreateAlias", @@ -79,7 +79,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

      Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift.

      When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a Amazon GameLift Amazon S3 location, and (2) it creates a new build resource.

      You can use the operation in the following scenarios:

      • To create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build.

      • To directly upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them.

      If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

      Learn more

      Uploading Your Game

      Create a Build with Files in Amazon S3

      All APIs by task

      " + "documentation":"

      Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift.

      When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2) it creates a new build resource.

      You can use the CreateBuild operation in the following scenarios:

      • Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build.

      • Upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them.

      If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

      Learn more

      Uploading Your Game

      Create a Build with Files in Amazon S3

      All APIs by task

      " }, "CreateFleet":{ "name":"CreateFleet", @@ -1079,7 +1079,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

      Retrieves the location of stored game session logs for a specified game session. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

      See the Amazon Web Services Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

      All APIs by task

      " + "documentation":"

      Retrieves the location of stored game session logs for a specified game session on Amazon GameLift managed fleets. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

      See the Amazon Web Services Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

      All APIs by task

      " }, "GetInstanceAccess":{ "name":"GetInstanceAccess", @@ -1987,6 +1987,16 @@ "GENERATED" ] }, + "ClaimFilterOption":{ + "type":"structure", + "members":{ + "InstanceStatuses":{ + "shape":"FilterInstanceStatuses", + "documentation":"

      List of instance statuses that game servers may be claimed on. If provided, the list must contain the ACTIVE status.

      " + } + }, + "documentation":"

      This data type is used with the Amazon GameLift FleetIQ and game server groups.

      Filters which game servers may be claimed when calling ClaimGameServer.

      " + }, "ClaimGameServerInput":{ "type":"structure", "required":["GameServerGroupName"], @@ -2002,6 +2012,10 @@ "GameServerData":{ "shape":"GameServerData", "documentation":"

      A set of custom game server properties, formatted as a single string value. This data is passed to a game client or service when it requests information on game servers.

      " + }, + "FilterOption":{ + "shape":"ClaimFilterOption", + "documentation":"

      Object that restricts how a claimed game server is chosen.

      " } } }, @@ -2177,7 +2191,7 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

      The operating system that you built the game server binaries to run on. This value determines the type of fleet resources that you can use for this build. If your game build contains multiple executables, they all must run on the same operating system. If an operating system isn't specified when creating a build, Amazon GameLift uses the default value (WINDOWS_2012). This value can't be changed later.

      " + "documentation":"

      The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later.

      If you have active fleets using the Windows Server 2012 operating system, you can continue to create new builds using this OS until October 10, 2023, when Microsoft ends its support. All others must use Windows Server 2016 when creating new Windows-based builds.

      " }, "Tags":{ "shape":"TagList", @@ -2478,7 +2492,7 @@ }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

      " + "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. By default, this property is set to 600.

      " }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", @@ -4207,6 +4221,17 @@ }, "documentation":"

      A list of fleet locations where a game session queue can place new game sessions. You can use a filter to temporarily turn off placements for specific locations. For queues that have multi-location fleets, you can use a filter configuration allow placement with some, but not all of these locations.

      " }, + "FilterInstanceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DRAINING" + ] + }, + "FilterInstanceStatuses":{ + "type":"list", + "member":{"shape":"FilterInstanceStatus"} + }, "FleetAction":{ "type":"string", "enum":["AUTO_SCALING"] @@ -5083,7 +5108,7 @@ }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

      " + "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. By default, this property is set to 600.

      " }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", @@ -7788,7 +7813,7 @@ }, "TimeoutInSeconds":{ "shape":"WholeNumber", - "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

      " + "documentation":"

      The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. By default, this property is set to 600.

      " }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", diff --git a/services/gamesparks/pom.xml b/services/gamesparks/pom.xml index 5aeedd9ef3fc..f4242f5d8c39 100644 --- a/services/gamesparks/pom.xml +++ b/services/gamesparks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT gamesparks AWS Java SDK :: Services :: Game Sparks diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index e64afabe25d5..9788b7459b29 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 00f6c5038e93..1eb326f58d22 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index bfbe2bf6e70a..0f5b1e3cffa9 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index a2258af6ba04..4e38fc15813b 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -3420,6 +3420,10 @@ "type":"list", "member":{"shape":"Action"} }, + "AdditionalOptionKeys":{ + "type":"string", + "enum":["performanceTuning.caching"] + }, "AdditionalOptions":{ "type":"map", "key":{"shape":"EnclosedInStringProperty"}, @@ -3518,14 +3522,14 @@ "members":{ "Key":{ "shape":"GenericString", - "documentation":"

      The key when specifying a key-value pair.

      " + "documentation":"

      The key for the additional connection option.

      " }, "Value":{ "shape":"GenericString", - "documentation":"

      The value when specifying a key-value pair.

      " + "documentation":"

      The value for the additional connection option.

      " } }, - "documentation":"

      Specifies an Amazon Redshift data store.

      " + "documentation":"

      Specifies an optional value when connecting to the Redshift cluster.

      " }, "AmazonRedshiftAdvancedOptions":{ "type":"list", @@ -5369,6 +5373,10 @@ "AmazonRedshiftTarget":{ "shape":"AmazonRedshiftTarget", "documentation":"

      Specifies a target that writes to a data target in Amazon Redshift.

      " + }, + "EvaluateDataQualityMultiFrame":{ + "shape":"EvaluateDataQualityMultiFrame", + "documentation":"

      Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.

      " } }, "documentation":"

      CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.

      " @@ -6839,11 +6847,11 @@ }, "DefaultArguments":{ "shape":"GenericMap", - "documentation":"

      The default arguments for this job.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.

      " + "documentation":"

      The default arguments for every run of this job, specified as name-value pairs.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.

      " }, "NonOverridableArguments":{ "shape":"GenericMap", - "documentation":"

      Non-overridable arguments for this job, specified as name-value pairs.

      " + "documentation":"

      Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.

      " }, "Connections":{ "shape":"ConnectionsList", @@ -6865,7 +6873,7 @@ }, "MaxCapacity":{ "shape":"NullableDouble", - "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      Do not set Max Capacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      " + "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " }, "SecurityConfiguration":{ "shape":"NameString", @@ -6881,7 +6889,7 @@ }, "GlueVersion":{ "shape":"GlueVersionString", - "documentation":"

      Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " + "documentation":"

      In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark.

      Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -6889,7 +6897,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      " + "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.

      " }, "CodeGenConfigurationNodes":{ "shape":"CodeGenConfigurationNodes", @@ -7674,6 +7682,16 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" }, + "DQAdditionalOptions":{ + "type":"map", + "key":{"shape":"AdditionalOptionKeys"}, + "value":{"shape":"GenericString"} + }, + "DQDLAliases":{ + "type":"map", + "key":{"shape":"NodeName"}, + "value":{"shape":"EnclosedInStringProperty"} + }, "DQDLString":{ "type":"string", "max":65536, @@ -7970,6 +7988,10 @@ "Result":{ "shape":"DataQualityRuleResultStatus", "documentation":"

      A pass or fail status for the rule.

      " + }, + "EvaluatedMetrics":{ + "shape":"EvaluatedMetricsMap", + "documentation":"

      A map of metrics associated with the evaluation of the rule.

      " } }, "documentation":"

      Describes the result of the evaluation of a data quality rule.

      " @@ -8124,6 +8146,10 @@ "DatabaseName":{ "shape":"NameString", "documentation":"

      The name of the database where the Glue table exists.

      " + }, + "CatalogId":{ + "shape":"NameString", + "documentation":"

      The catalog id where the Glue table exists.

      " } }, "documentation":"

      An object representing an Glue table.

      " @@ -8139,6 +8165,11 @@ }, "documentation":"

      A data source (an Glue table) for which you want data quality results.

      " }, + "DataSourceMap":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"DataSource"} + }, "Database":{ "type":"structure", "required":["Name"], @@ -8192,6 +8223,10 @@ "DatabaseName":{ "shape":"NameString", "documentation":"

      The name of the catalog database.

      " + }, + "Region":{ + "shape":"NameString", + "documentation":"

      Region of the target database.

      " } }, "documentation":"

      A structure that describes a target database for resource linking.

      " @@ -9551,6 +9586,50 @@ }, "documentation":"

      Specifies your data quality evaluation criteria.

      " }, + "EvaluateDataQualityMultiFrame":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Ruleset" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

      The name of the data quality evaluation.

      " + }, + "Inputs":{ + "shape":"ManyInputs", + "documentation":"

      The inputs of your data quality evaluation. The first input in this list is the primary data source.

      " + }, + "AdditionalDataSources":{ + "shape":"DQDLAliases", + "documentation":"

      The aliases of all data sources except primary.

      " + }, + "Ruleset":{ + "shape":"DQDLString", + "documentation":"

      The ruleset for your data quality evaluation.

      " + }, + "PublishingOptions":{ + "shape":"DQResultsPublishingOptions", + "documentation":"

      Options to configure how your results are published.

      " + }, + "AdditionalOptions":{ + "shape":"DQAdditionalOptions", + "documentation":"

      Options to configure runtime behavior of the transform.

      " + }, + "StopJobOnFailureOptions":{ + "shape":"DQStopJobOnFailureOptions", + "documentation":"

      Options to configure how your job will stop if your data quality evaluation fails.

      " + } + }, + "documentation":"

      Specifies your data quality evaluation criteria.

      " + }, + "EvaluatedMetricsMap":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"NullableDouble"} + }, "EvaluationMetrics":{ "type":"structure", "required":["TransformType"], @@ -10590,6 +10669,10 @@ "ResultIds":{ "shape":"DataQualityResultIdList", "documentation":"

      A list of result IDs for the data quality results for the run.

      " + }, + "AdditionalDataSources":{ + "shape":"DataSourceMap", + "documentation":"

      A map of reference strings to additional data sources you can specify for an evaluation run.

      " } } }, @@ -12953,11 +13036,11 @@ }, "DefaultArguments":{ "shape":"GenericMap", - "documentation":"

      The default arguments for this job, specified as name-value pairs.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.

      " + "documentation":"

      The default arguments for every run of this job, specified as name-value pairs.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.

      " }, "NonOverridableArguments":{ "shape":"GenericMap", - "documentation":"

      Non-overridable arguments for this job, specified as name-value pairs.

      " + "documentation":"

      Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.

      " }, "Connections":{ "shape":"ConnectionsList", @@ -12979,11 +13062,11 @@ }, "MaxCapacity":{ "shape":"NullableDouble", - "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " + "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

      • For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs.

      • For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      " + "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

      • For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

      • For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).

      " }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -12999,7 +13082,7 @@ }, "GlueVersion":{ "shape":"GlueVersionString", - "documentation":"

      Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " + "documentation":"

      In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark.

      Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " }, "CodeGenConfigurationNodes":{ "shape":"CodeGenConfigurationNodes", @@ -13076,7 +13159,7 @@ "members":{ "Name":{ "shape":"GenericString", - "documentation":"

      The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming.

      " + "documentation":"

      The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming. For a Ray job, this must be glueray.

      " }, "ScriptLocation":{ "shape":"ScriptLocationString", @@ -13085,6 +13168,10 @@ "PythonVersion":{ "shape":"PythonVersionString", "documentation":"

      The Python version being used to run a Python shell job. Allowed values are 2 or 3.

      " + }, + "Runtime":{ + "shape":"RuntimeNameString", + "documentation":"

      In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide.

      " } }, "documentation":"

      Specifies code that runs when a job is run.

      " @@ -13149,7 +13236,7 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

      The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      For information about how to specify and consume your own job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.

      " + "documentation":"

      The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.

      " }, "ErrorMessage":{ "shape":"ErrorString", @@ -13175,11 +13262,11 @@ }, "MaxCapacity":{ "shape":"NullableDouble", - "documentation":"

      The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      Do not set Max Capacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " + "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

      • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      " + "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.

      " }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -13199,7 +13286,7 @@ }, "GlueVersion":{ "shape":"GlueVersionString", - "documentation":"

      Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " + "documentation":"

      In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark.

      Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " }, "DPUSeconds":{ "shape":"NullableDouble", @@ -13255,11 +13342,11 @@ }, "DefaultArguments":{ "shape":"GenericMap", - "documentation":"

      The default arguments for this job.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.

      " + "documentation":"

      The default arguments for every run of this job, specified as name-value pairs.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.

      " }, "NonOverridableArguments":{ "shape":"GenericMap", - "documentation":"

      Non-overridable arguments for this job, specified as name-value pairs.

      " + "documentation":"

      Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value pairs.

      " }, "Connections":{ "shape":"ConnectionsList", @@ -13281,11 +13368,11 @@ }, "MaxCapacity":{ "shape":"NullableDouble", - "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      Do not set Max Capacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      " + "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      " + "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.

      " }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -13301,7 +13388,7 @@ }, "GlueVersion":{ "shape":"GlueVersionString", - "documentation":"

      Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      " + "documentation":"

      In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark.

      Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command.

      For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.

      Jobs that are created without specifying a Glue version default to Glue 0.9.

      " }, "CodeGenConfigurationNodes":{ "shape":"CodeGenConfigurationNodes", @@ -16287,6 +16374,11 @@ } } }, + "RuntimeNameString":{ + "type":"string", + "max":64, + "pattern":".*" + }, "S3CatalogDeltaSource":{ "type":"structure", "required":[ @@ -17996,6 +18088,10 @@ "RulesetNames":{ "shape":"RulesetNames", "documentation":"

      A list of ruleset names.

      " + }, + "AdditionalDataSources":{ + "shape":"DataSourceMap", + "documentation":"

      A map of reference strings to additional data sources you can specify for an evaluation run.

      " } } }, @@ -18078,7 +18174,7 @@ }, "Arguments":{ "shape":"GenericMap", - "documentation":"

      The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.

      " + "documentation":"

      The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.

      You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself consumes.

      Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

      For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters Used by Glue topic in the developer guide.

      For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray jobs in the developer guide.

      " }, "AllocatedCapacity":{ "shape":"IntegerValue", @@ -18092,7 +18188,7 @@ }, "MaxCapacity":{ "shape":"NullableDouble", - "documentation":"

      The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      Do not set Max Capacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " + "documentation":"

      For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

      For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers.

      Do not set MaxCapacity if using WorkerType and NumberOfWorkers.

      The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

      • When you specify a Python shell job (JobCommand.Name=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.

      • When you specify an Apache Spark ETL job (JobCommand.Name=\"glueetl\") or Apache Spark streaming ETL job (JobCommand.Name=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.

      " }, "SecurityConfiguration":{ "shape":"NameString", @@ -18104,7 +18200,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.

      • For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      " + "documentation":"

      The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

      • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

      • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

      • For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.

      " }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -18648,6 +18744,10 @@ "Name":{ "shape":"NameString", "documentation":"

      The name of the target table.

      " + }, + "Region":{ + "shape":"NameString", + "documentation":"

      Region of the target table.

      " } }, "documentation":"

      A structure that describes a target table for resource linking.

      " @@ -20394,7 +20494,8 @@ "G.2X", "G.025X", "G.4X", - "G.8X" + "G.8X", + "Z.2X" ] }, "Workflow":{ diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 249a4b7eaa23..f2e8c3e4bb1a 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 987255d6bde8..2af7638eb517 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index ef57a83c09cc..da839c8ec3da 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index b206ab48596b..948d2e57df90 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/groundstation/src/main/resources/codegen-resources/endpoint-tests.json b/services/groundstation/src/main/resources/codegen-resources/endpoint-tests.json index 7869aa5ffc58..b004777ce9af 100644 --- a/services/groundstation/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/groundstation/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -216,9 +216,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -229,9 +229,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -255,9 +255,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -281,9 +281,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -307,9 +307,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -318,9 +318,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -331,9 +331,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -342,9 +342,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -355,9 +355,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -366,9 +366,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -379,9 +379,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -390,9 +390,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -403,9 +403,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -416,9 +416,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -441,9 +441,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -453,9 +453,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/services/groundstation/src/main/resources/codegen-resources/service-2.json b/services/groundstation/src/main/resources/codegen-resources/service-2.json index 2151e4f35af2..e8af0d4a9148 100644 --- a/services/groundstation/src/main/resources/codegen-resources/service-2.json +++ b/services/groundstation/src/main/resources/codegen-resources/service-2.json @@ -256,7 +256,7 @@ {"shape":"DependencyException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Returns the number of minutes used by account.

      " + "documentation":"

      Returns the number of reserved minutes used by account.

      " }, "GetMissionProfile":{ "name":"GetMissionProfile", diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index addca5b07b7b..769b269c725b 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index 6746a970260c..44ff57eed1e9 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -117,7 +117,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

      Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated member accounts either by invitation or through an organization.

      When using Create Members as an organizations delegated administrator this action will enable GuardDuty in the added member accounts, with the exception of the organization delegated administrator account, which must enable GuardDuty prior to being added as a member.

      If you are adding accounts by invitation, use this action after GuardDuty has bee enabled in potential member accounts and before using InviteMembers.

      " + "documentation":"

      Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated member accounts either by invitation or through an organization.

      As a delegated administrator, using CreateMembers will enable GuardDuty in the added member accounts, with the exception of the organization delegated administrator account. A delegated administrator must enable GuardDuty prior to being added as a member.

      If you are adding accounts by invitation, before using InviteMembers, use CreateMembers after GuardDuty has been enabled in potential member accounts.

      If you disassociate a member from a GuardDuty delegated administrator, the member account details obtained from this API, including the associated email addresses, will be retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

      " }, "CreatePublishingDestination":{ "name":"CreatePublishingDestination", @@ -357,7 +357,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

      Disassociates the current GuardDuty member account from its administrator account.

      With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account.

      " + "documentation":"

      Disassociates the current GuardDuty member account from its administrator account.

      When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

      With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account.

      " }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", @@ -372,7 +372,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

      Disassociates the current GuardDuty member account from its administrator account.

      ", + "documentation":"

      Disassociates the current GuardDuty member account from its administrator account.

      When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

      ", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use DisassociateFromAdministratorAccount instead" }, @@ -389,7 +389,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

      Disassociates GuardDuty member accounts (to the current administrator account) specified by the account IDs.

      With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disassociate a member account before removing them from your Amazon Web Services organization.

      " + "documentation":"

      Disassociates GuardDuty member accounts (from the current administrator account) specified by the account IDs.

      When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

      With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disassociate a member account before removing them from your Amazon Web Services organization.

      " }, "EnableOrganizationAdminAccount":{ "name":"EnableOrganizationAdminAccount", @@ -646,7 +646,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

      Invites other Amazon Web Services accounts (created as members of the current Amazon Web Services account by CreateMembers) to enable GuardDuty, and allow the current Amazon Web Services account to view and manage these accounts' findings on their behalf as the GuardDuty administrator account.

      " + "documentation":"

      Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account that invokes this API. If you are using Amazon Web Services Organizations to manager your GuardDuty environment, this step is not needed. For more information, see Managing accounts with Amazon Web Services Organizations.

      To invite Amazon Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API to add accounts by invitation. The invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can choose to accept the invitation from only one Amazon Web Services account. For more information, see Managing GuardDuty accounts by invitation.

      After the invite has been accepted and you choose to disassociate a member account (by using DisassociateMembers) from your account, the details of the member account obtained by invoking CreateMembers, including the associated email addresses, will be retained. This is done so that you can invoke InviteMembers without the need to invoke CreateMembers again. To remove the details associated with a member account, you must also invoke DeleteMembers.

      " }, "ListCoverage":{ "name":"ListCoverage", @@ -3096,7 +3096,7 @@ "members":{ "Domain":{ "shape":"String", - "documentation":"

      The domain information for the API request.

      ", + "documentation":"

      The domain information for the DNS query.

      ", "locationName":"domain" }, "Protocol":{ diff --git a/services/health/pom.xml b/services/health/pom.xml index 77eb5738eb9b..09b5e6ee950a 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index fc44fb9b3bc6..287cab675eca 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json index d21bd84ee0dc..8acf26e8a35b 100644 --- a/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/healthlake/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://healthlake-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://healthlake-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://healthlake-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://healthlake.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://healthlake-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://healthlake.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://healthlake.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://healthlake.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/healthlake/src/main/resources/codegen-resources/endpoint-tests.json b/services/healthlake/src/main/resources/codegen-resources/endpoint-tests.json index 1aef86cbbe42..a234a21aa71e 100644 --- a/services/healthlake/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/healthlake/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://healthlake-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://healthlake-fips.us-west-2.amazonaws.com" + "url": "https://healthlake.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://healthlake.us-west-2.api.aws" + "url": "https://healthlake.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,9 +34,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-west-2", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -60,9 +47,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -73,9 +60,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -86,87 +73,235 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://healthlake.us-east-1.amazonaws.com" + "url": "https://healthlake-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://healthlake-fips.us-east-2.api.aws" + "url": "https://healthlake-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://healthlake-fips.us-east-2.amazonaws.com" + "url": "https://healthlake.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://healthlake.us-east-2.api.aws" + "url": "https://healthlake.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://healthlake.us-east-2.amazonaws.com" + "url": "https://healthlake-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://healthlake.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://healthlake.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -176,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -188,11 +323,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/healthlake/src/main/resources/codegen-resources/service-2.json b/services/healthlake/src/main/resources/codegen-resources/service-2.json index 99777b5cfb48..7d769be96806 100644 --- a/services/healthlake/src/main/resources/codegen-resources/service-2.json +++ b/services/healthlake/src/main/resources/codegen-resources/service-2.json @@ -205,7 +205,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Adds a user specifed key and value tag to a Data Store.

      " + "documentation":"

      Adds a user specified key and value tag to a Data Store.

      " }, "UntagResource":{ "name":"UntagResource", @@ -237,6 +237,14 @@ "min":1, "pattern":"^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:healthlake:[a-z0-9-]+:\\d{12}:datastore\\/fhir\\/.{32}" }, + "AuthorizationStrategy":{ + "type":"string", + "enum":[ + "SMART_ON_FHIR_V1", + "AWS_AUTH" + ] + }, + "Boolean":{"type":"boolean"}, "BoundedLengthString":{ "type":"string", "max":5000, @@ -256,6 +264,7 @@ "AWS_OWNED_KMS_KEY" ] }, + "ConfigurationMetadata":{"type":"string"}, "ConflictException":{ "type":"structure", "members":{ @@ -292,6 +301,10 @@ "Tags":{ "shape":"TagList", "documentation":"

      Resource tags that are applied to a Data Store when it is created.

      " + }, + "IdentityProviderConfiguration":{ + "shape":"IdentityProviderConfiguration", + "documentation":"

      The configuration of the identity provider that you want to use for your Data Store.

      " } } }, @@ -310,7 +323,7 @@ }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"

      The datastore ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.

      " + "documentation":"

      The Data Store ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.

      " }, "DatastoreStatus":{ "shape":"DatastoreStatus", @@ -318,7 +331,7 @@ }, "DatastoreEndpoint":{ "shape":"BoundedLengthString", - "documentation":"

      The AWS endpoint for the created Data Store. For preview, only US-east-1 endpoints are supported.

      " + "documentation":"

      The AWS endpoint for the created Data Store.

      " } } }, @@ -405,9 +418,13 @@ "PreloadDataConfig":{ "shape":"PreloadDataConfig", "documentation":"

      The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.

      " + }, + "IdentityProviderConfiguration":{ + "shape":"IdentityProviderConfiguration", + "documentation":"

      The identity provider that you selected when you created the Data Store.

      " } }, - "documentation":"

      Displays the properties of the Data Store, including the ID, Arn, name, and the status of the Data Store.

      " + "documentation":"

      Displays the properties of the Data Store, including the ID, ARN, name, and the status of the Data Store.

      " }, "DatastorePropertiesList":{ "type":"list", @@ -424,6 +441,7 @@ }, "DeleteFHIRDatastoreRequest":{ "type":"structure", + "required":["DatastoreId"], "members":{ "DatastoreId":{ "shape":"DatastoreId", @@ -460,10 +478,11 @@ }, "DescribeFHIRDatastoreRequest":{ "type":"structure", + "required":["DatastoreId"], "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

      The AWS-generated Data Store id. This is part of the ‘CreateFHIRDatastore’ output.

      " + "documentation":"

      The AWS-generated Data Store ID.

      " } } }, @@ -600,6 +619,29 @@ "min":20, "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" }, + "IdentityProviderConfiguration":{ + "type":"structure", + "required":["AuthorizationStrategy"], + "members":{ + "AuthorizationStrategy":{ + "shape":"AuthorizationStrategy", + "documentation":"

      The authorization strategy that you selected when you created the Data Store.

      " + }, + "FineGrainedAuthorizationEnabled":{ + "shape":"Boolean", + "documentation":"

      If you enabled fine-grained authorization when you created the Data Store.

      " + }, + "Metadata":{ + "shape":"ConfigurationMetadata", + "documentation":"

      The JSON metadata elements that you want to use in your identity provider configuration. Required elements are listed based on the launch specification of the SMART application. For more information on all possible elements, see Metadata in SMART's App Launch specification.

      authorization_endpoint: The URL to the OAuth2 authorization endpoint.

      grant_types_supported: An array of grant types that are supported at the token endpoint. You must provide at least one grant type option. Valid options are authorization_code and client_credentials.

      token_endpoint: The URL to the OAuth2 token endpoint.

      capabilities: An array of strings of the SMART capabilities that the authorization server supports.

      code_challenge_methods_supported: An array of strings of supported PKCE code challenge methods. You must include the S256 method in the array of PKCE code challenge methods.

      " + }, + "IdpLambdaArn":{ + "shape":"LambdaArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Lambda function that you want to use to decode the access token created by the authorization server.

      " + } + }, + "documentation":"

      The identity provider configuration that you gave when the Data Store was created.

      " + }, "ImportJobProperties":{ "type":"structure", "required":[ @@ -620,7 +662,7 @@ }, "JobStatus":{ "shape":"JobStatus", - "documentation":"

      The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED, FAILED.

      " + "documentation":"

      The job status for an Import job. Possible statuses are SUBMITTED, IN_PROGRESS, COMPLETED_WITH_ERRORS, COMPLETED, FAILED.

      " }, "SubmitTime":{ "shape":"Timestamp", @@ -693,7 +735,11 @@ "IN_PROGRESS", "COMPLETED_WITH_ERRORS", "COMPLETED", - "FAILED" + "FAILED", + "CANCEL_SUBMITTED", + "CANCEL_IN_PROGRESS", + "CANCEL_COMPLETED", + "CANCEL_FAILED" ] }, "KmsEncryptionConfig":{ @@ -711,6 +757,12 @@ }, "documentation":"

      The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

      " }, + "LambdaArn":{ + "type":"string", + "max":256, + "min":49, + "pattern":"arn:aws:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9\\-_\\.]+(:(\\$LATEST|[a-zA-Z0-9\\-_]+))?" + }, "ListFHIRDatastoresRequest":{ "type":"structure", "members":{ @@ -1067,7 +1119,7 @@ }, "Value":{ "shape":"TagValue", - "documentation":"

      The value portion of tag. Tag values are case sensitive.

      " + "documentation":"

      The value portion of a tag. Tag values are case sensitive.

      " } }, "documentation":"

      A tag is a label consisting of a user-defined key and value. The form for tags is {\"Key\", \"Value\"}

      " diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index fc6ec1c12aaa..907e931d3cd5 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index bd846fbe38cc..2bbf0858be2f 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json index c1cc7ead31f3..29693995e323 100644 --- a/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,216 +138,91 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://iam.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } + "name" ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + "aws" ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } + true ] }, { - "conditions": [], - "endpoint": { - "url": "https://iam.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-east-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://iam-fips.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -365,208 +240,40 @@ }, "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://iam.cn-north-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "name": "sigv4", + "signingName": "iam", + "signingRegion": "cn-north-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://iam.cn-north-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "cn-north-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -584,216 +291,91 @@ }, "aws-us-gov" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://iam.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-gov-west-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "stringEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" ] }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } + "ref": "UseFIPS" + }, + true ] }, { - "conditions": [], - "endpoint": { - "url": "https://iam.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-gov-west-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://iam.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-gov-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -811,80 +393,40 @@ }, "aws-iso" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.c2s.ic.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [], - "endpoint": { - "url": "https://iam.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-iso-east-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://iam.us-iso-east-1.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -902,80 +444,40 @@ }, "aws-iso-b" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iam-fips.{Region}.sc2s.sgov.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [], - "endpoint": { - "url": "https://iam.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-isob-east-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://iam.us-isob-east-1.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "iam", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -1097,60 +599,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://iam-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://iam.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { @@ -1233,141 +681,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://iam.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://iam.cn-north-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "cn-north-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://iam.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-global" - ] - } - ], - "endpoint": { - "url": "https://iam.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-iso-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-b-global" - ] - } - ], - "endpoint": { - "url": "https://iam.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "iam", - "signingRegion": "us-isob-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/iam/src/main/resources/codegen-resources/endpoint-tests.json b/services/iam/src/main/resources/codegen-resources/endpoint-tests.json index 18079eb05dc6..71dafa82681f 100644 --- a/services/iam/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/iam/src/main/resources/codegen-resources/endpoint-tests.json @@ -17,9 +17,9 @@ } }, "params": { + "Region": "aws-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -39,9 +39,9 @@ } }, "params": { + "Region": "aws-global", "UseFIPS": true, - "UseDualStack": false, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -52,9 +52,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -74,9 +74,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -87,9 +87,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -109,9 +109,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -131,9 +131,9 @@ } }, "params": { + "Region": "aws-cn-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { @@ -144,9 +144,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -157,9 +157,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -170,9 +170,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -192,9 +192,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -214,9 +214,9 @@ } }, "params": { + "Region": "aws-us-gov-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ } }, "params": { + "Region": "aws-us-gov-global", "UseFIPS": true, - "UseDualStack": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -249,9 +249,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -271,9 +271,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -284,9 +284,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -306,9 +306,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -328,9 +328,20 @@ } }, "params": { + "Region": "aws-iso-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-iso-global" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -341,9 +352,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -363,9 +385,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -385,9 +407,20 @@ } }, "params": { + "Region": "aws-iso-b-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-iso-b-global" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -398,9 +431,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -420,9 +464,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -433,9 +477,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -458,9 +502,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -470,11 +514,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/iam/src/main/resources/codegen-resources/service-2.json b/services/iam/src/main/resources/codegen-resources/service-2.json index b4228759a724..bc883919a5f3 100644 --- a/services/iam/src/main/resources/codegen-resources/service-2.json +++ b/services/iam/src/main/resources/codegen-resources/service-2.json @@ -150,11 +150,12 @@ }, "input":{"shape":"CreateAccountAliasRequest"}, "errors":[ + {"shape":"ConcurrentModificationException"}, {"shape":"EntityAlreadyExistsException"}, {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Creates an alias for your Amazon Web Services account. For information about using an Amazon Web Services account alias, see Using an alias for your Amazon Web Services account ID in the IAM User Guide.

      " + "documentation":"

      Creates an alias for your Amazon Web Services account. For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide.

      " }, "CreateGroup":{ "name":"CreateGroup", @@ -406,7 +407,8 @@ {"shape":"EntityTemporarilyUnmodifiableException"}, {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

      Deactivates the specified MFA device and removes it from association with the user name for which it was originally enabled.

      For more information about creating and working with virtual MFA devices, see Enabling a virtual multi-factor authentication (MFA) device in the IAM User Guide.

      " }, @@ -432,11 +434,12 @@ }, "input":{"shape":"DeleteAccountAliasRequest"}, "errors":[ + {"shape":"ConcurrentModificationException"}, {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

      Deletes the specified Amazon Web Services account alias. For information about using an Amazon Web Services account alias, see Using an alias for your Amazon Web Services account ID in the IAM User Guide.

      " + "documentation":"

      Deletes the specified Amazon Web Services account alias. For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide.

      " }, "DeleteAccountPasswordPolicy":{ "name":"DeleteAccountPasswordPolicy", @@ -684,6 +687,7 @@ "errors":[ {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], "documentation":"

      Deletes a signing certificate associated with the specified IAM user.

      If you do not specify a user name, IAM determines the user name implicitly based on the Amazon Web Services access key ID signing the request. This operation works for access keys under the Amazon Web Services account. Consequently, you can use this operation to manage Amazon Web Services account root user credentials even if the Amazon Web Services account has no associated IAM users.

      " @@ -742,7 +746,8 @@ {"shape":"NoSuchEntityException"}, {"shape":"DeleteConflictException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

      Deletes a virtual MFA device.

      You must deactivate a user's virtual MFA device before you can delete it. For information about deactivating MFA devices, see DeactivateMFADevice.

      " }, @@ -805,7 +810,8 @@ {"shape":"InvalidAuthenticationCodeException"}, {"shape":"LimitExceededException"}, {"shape":"NoSuchEntityException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

      Enables the specified MFA device and associates it with the specified IAM user. When enabled, the MFA device is required for every subsequent login by the IAM user associated with the device.

      " }, @@ -1313,7 +1319,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

      Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Using an alias for your Amazon Web Services account ID in the IAM User Guide.

      " + "documentation":"

      Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide.

      " }, "ListAttachedGroupPolicies":{ "name":"ListAttachedGroupPolicies", @@ -1673,7 +1679,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

      Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

      IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a role, see GetRole.

      You can paginate the results using the MaxItems and Marker parameters.

      " + "documentation":"

      Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

      IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

      • PermissionsBoundary

      • RoleLastUsed

      • Tags

      To view all of the information for a role, see GetRole.

      You can paginate the results using the MaxItems and Marker parameters.

      " }, "ListSAMLProviderTags":{ "name":"ListSAMLProviderTags", @@ -1840,7 +1846,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

      Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

      IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a user, see GetUser.

      You can paginate the results using the MaxItems and Marker parameters.

      " + "documentation":"

      Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

      IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

      • PermissionsBoundary

      • Tags

      To view all of the information for a user, see GetUser.

      You can paginate the results using the MaxItems and Marker parameters.

      " }, "ListVirtualMFADevices":{ "name":"ListVirtualMFADevices", @@ -2002,7 +2008,8 @@ {"shape":"InvalidAuthenticationCodeException"}, {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"ConcurrentModificationException"} ], "documentation":"

      Synchronizes the specified MFA device with its IAM resource object on the Amazon Web Services servers.

      For more information about creating and working with virtual MFA devices, see Using a virtual MFA device in the IAM User Guide.

      " }, @@ -2589,6 +2596,7 @@ {"shape":"InvalidCertificateException"}, {"shape":"DuplicateCertificateException"}, {"shape":"NoSuchEntityException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], "documentation":"

      Uploads an X.509 signing certificate and associates it with the specified IAM user. Some Amazon Web Services services require you to use certificates to validate requests that are signed with a corresponding private key. When you upload the certificate, its default status is Active.

      For information about when you would use an X.509 signing certificate, see Managing server certificates in IAM in the IAM User Guide.

      If the UserName is not specified, the IAM user name is determined implicitly based on the Amazon Web Services access key ID used to sign the request. This operation works for access keys under the Amazon Web Services account. Consequently, you can use this operation to manage Amazon Web Services account root user credentials even if the Amazon Web Services account has no associated users.

      Because the body of an X.509 certificate can be large, you should use POST rather than GET when calling UploadSigningCertificate. For information about setting up signatures and authorization through the API, see Signing Amazon Web Services API requests in the Amazon Web Services General Reference. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide.

      " @@ -8545,7 +8553,7 @@ }, "Base32StringSeed":{ "shape":"BootstrapDatum", - "documentation":"

      The base32 seed defined as specified in RFC3548. The Base32StringSeed is base64-encoded.

      " + "documentation":"

      The base32 seed defined as specified in RFC3548. The Base32StringSeed is base32-encoded.

      " }, "QRCodePNG":{ "shape":"BootstrapDatum", @@ -8589,7 +8597,7 @@ "type":"string", "max":63, "min":3, - "pattern":"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$" + "pattern":"^[a-z0-9]([a-z0-9]|-(?!-)){1,61}[a-z0-9]$" }, "arnType":{ "type":"string", @@ -8827,7 +8835,7 @@ "type":"string", "max":512, "min":1, - "pattern":"(\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F)" + "pattern":"(\\u002F)|(\\u002F[\\u0021-\\u007E]+\\u002F)" }, "policyDescriptionType":{ "type":"string", @@ -8931,7 +8939,7 @@ "roleDescriptionType":{ "type":"string", "max":1000, - "pattern":"[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*" + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]*" }, "roleDetailListType":{ "type":"list", diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 9c5347d5dfd1..4c0bfdab0059 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 4aee77e15004..360b98e329af 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json b/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json index ed29f944ecff..bcfa0a4ab2f7 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -188,9 +188,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -201,9 +201,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -212,9 +212,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -225,9 +225,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -249,9 +249,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -273,9 +273,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -286,9 +286,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -300,8 +300,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -311,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -323,9 +323,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json index dee0502afd3c..3f1b55a9e8e0 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -3501,7 +3501,7 @@ }, "dateNextRun":{ "shape":"DateTime", - "documentation":"

      This is no longer supported, and does not return a value.

      " + "documentation":"

      The next date when the pipeline is scheduled to run.

      " }, "tags":{ "shape":"TagMap", diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 0470b236ea4e..b0bd74af4170 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 792dcf86a1b7..619ca6bc24d5 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/inspector2/src/main/resources/codegen-resources/service-2.json b/services/inspector2/src/main/resources/codegen-resources/service-2.json index d2c6140d4ca4..26d23ea7d676 100644 --- a/services/inspector2/src/main/resources/codegen-resources/service-2.json +++ b/services/inspector2/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,23 @@ ], "documentation":"

      Retrieves the Amazon Inspector status of multiple Amazon Web Services accounts within your environment.

      " }, + "BatchGetCodeSnippet":{ + "name":"BatchGetCodeSnippet", + "http":{ + "method":"POST", + "requestUri":"/codesnippet/batchget", + "responseCode":200 + }, + "input":{"shape":"BatchGetCodeSnippetRequest"}, + "output":{"shape":"BatchGetCodeSnippetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieves code snippets from findings that Amazon Inspector detected code vulnerabilities in.

      " + }, "BatchGetFreeTrialInfo":{ "name":"BatchGetFreeTrialInfo", "http":{ @@ -117,6 +134,25 @@ ], "documentation":"

      Cancels the given findings report.

      " }, + "CancelSbomExport":{ + "name":"CancelSbomExport", + "http":{ + "method":"POST", + "requestUri":"/sbomexport/cancel", + "responseCode":200 + }, + "input":{"shape":"CancelSbomExportRequest"}, + "output":{"shape":"CancelSbomExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Cancels a software bill of materials (SBOM) report.

      ", + "idempotent":true + }, "CreateFilter":{ "name":"CreateFilter", "http":{ @@ -154,6 +190,25 @@ ], "documentation":"

      Creates a finding report. By default only ACTIVE findings are returned in the report. To see SUPRESSED or CLOSED findings you must specify a value for the findingStatus filter criteria.

      " }, + "CreateSbomExport":{ + "name":"CreateSbomExport", + "http":{ + "method":"POST", + "requestUri":"/sbomexport/create", + "responseCode":200 + }, + "input":{"shape":"CreateSbomExportRequest"}, + "output":{"shape":"CreateSbomExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a software bill of materials (SBOM) report.

      ", + "idempotent":true + }, "DeleteFilter":{ "name":"DeleteFilter", "http":{ @@ -331,6 +386,24 @@ ], "documentation":"

      Retrieves the activation status of Amazon Inspector deep inspection and custom paths associated with your account.

      " }, + "GetEncryptionKey":{ + "name":"GetEncryptionKey", + "http":{ + "method":"GET", + "requestUri":"/encryptionkey/get", + "responseCode":200 + }, + "input":{"shape":"GetEncryptionKeyRequest"}, + "output":{"shape":"GetEncryptionKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets an encryption key.

      " + }, "GetFindingsReportStatus":{ "name":"GetFindingsReportStatus", "http":{ @@ -367,6 +440,25 @@ ], "documentation":"

      Gets member information for your organization.

      " }, + "GetSbomExport":{ + "name":"GetSbomExport", + "http":{ + "method":"POST", + "requestUri":"/sbomexport/get", + "responseCode":200 + }, + "input":{"shape":"GetSbomExportRequest"}, + "output":{"shape":"GetSbomExportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets details of a software bill of materials (SBOM) report.

      ", + "idempotent":true + }, "ListAccountPermissions":{ "name":"ListAccountPermissions", "http":{ @@ -533,6 +625,25 @@ ], "documentation":"

      Lists the Amazon Inspector usage totals over the last 30 days.

      " }, + "ResetEncryptionKey":{ + "name":"ResetEncryptionKey", + "http":{ + "method":"PUT", + "requestUri":"/encryptionkey/reset", + "responseCode":200 + }, + "input":{"shape":"ResetEncryptionKeyRequest"}, + "output":{"shape":"ResetEncryptionKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Resets an encryption key. After the key is reset your resources will be encrypted by an Amazon Web Services owned key.

      ", + "idempotent":true + }, "SearchVulnerabilities":{ "name":"SearchVulnerabilities", "http":{ @@ -620,6 +731,25 @@ ], "documentation":"

      Activates, deactivates Amazon Inspector deep inspection, or updates custom paths for your account.

      " }, + "UpdateEncryptionKey":{ + "name":"UpdateEncryptionKey", + "http":{ + "method":"PUT", + "requestUri":"/encryptionkey/update", + "responseCode":200 + }, + "input":{"shape":"UpdateEncryptionKeyRequest"}, + "output":{"shape":"UpdateEncryptionKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Updates an encryption key. A ResourceNotFoundException means that an AWS owned key is being used for encryption.

      ", + "idempotent":true + }, "UpdateFilter":{ "name":"UpdateFilter", "http":{ @@ -804,7 +934,8 @@ "type":"string", "enum":[ "NETWORK_REACHABILITY", - "PACKAGE_VULNERABILITY" + "PACKAGE_VULNERABILITY", + "CODE_VULNERABILITY" ] }, "AggregationRequest":{ @@ -1069,6 +1200,10 @@ "lambda":{ "shape":"Boolean", "documentation":"

      Represents whether AWS Lambda standard scans are automatically enabled for new members of your Amazon Inspector organization.

      " + }, + "lambdaCode":{ + "shape":"Boolean", + "documentation":"

      Represents whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.

       </p> 
      " } }, "documentation":"

      Represents which scan types are automatically enabled for new members of your Amazon Inspector organization.

      " @@ -1328,6 +1463,35 @@ } } }, + "BatchGetCodeSnippetRequest":{ + "type":"structure", + "required":["findingArns"], + "members":{ + "findingArns":{ + "shape":"BatchGetCodeSnippetRequestFindingArnsList", + "documentation":"

      An array of finding ARNs for the findings you want to retrieve code snippets from.

      " + } + } + }, + "BatchGetCodeSnippetRequestFindingArnsList":{ + "type":"list", + "member":{"shape":"FindingArn"}, + "max":10, + "min":1 + }, + "BatchGetCodeSnippetResponse":{ + "type":"structure", + "members":{ + "codeSnippetResults":{ + "shape":"CodeSnippetResultList", + "documentation":"

      The retrieved code snippets associated with the provided finding ARNs.

      " + }, + "errors":{ + "shape":"CodeSnippetErrorList", + "documentation":"

      Any errors Amazon Inspector encountered while trying to retrieve the requested code snippets.

      " + } + } + }, "BatchGetFreeTrialInfoRequest":{ "type":"structure", "required":["accountIds"], @@ -1430,6 +1594,25 @@ } } }, + "CancelSbomExportRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

      The report ID of the SBOM export to cancel.

      " + } + } + }, + "CancelSbomExportResponse":{ + "type":"structure", + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

      The report ID of the canceled SBOM export.

      " + } + } + }, "CisaAction":{ "type":"string", "min":0 @@ -1459,6 +1642,173 @@ "max":64, "min":1 }, + "CodeFilePath":{ + "type":"structure", + "required":[ + "endLine", + "fileName", + "filePath", + "startLine" + ], + "members":{ + "endLine":{ + "shape":"Integer", + "documentation":"

      The line number of the last line of code that a vulnerability was found in.

      " + }, + "fileName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the file the code vulnerability was found in.

      " + }, + "filePath":{ + "shape":"NonEmptyString", + "documentation":"

      The file path to the code that a vulnerability was found in.

      " + }, + "startLine":{ + "shape":"Integer", + "documentation":"

      The line number of the first line of code that a vulnerability was found in.

      " + } + }, + "documentation":"

      Contains information on where a code vulnerability is located in your Lambda function.

      " + }, + "CodeLine":{ + "type":"structure", + "required":[ + "content", + "lineNumber" + ], + "members":{ + "content":{ + "shape":"CodeLineContentString", + "documentation":"

      The content of a line of code

      " + }, + "lineNumber":{ + "shape":"Integer", + "documentation":"

      The line number that a section of code is located at.

      " + } + }, + "documentation":"

      Contains information on the lines of code associated with a code snippet.

      " + }, + "CodeLineContentString":{ + "type":"string", + "max":240, + "min":0 + }, + "CodeLineList":{ + "type":"list", + "member":{"shape":"CodeLine"}, + "max":20, + "min":1 + }, + "CodeSnippetError":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "findingArn" + ], + "members":{ + "errorCode":{ + "shape":"CodeSnippetErrorCode", + "documentation":"

      The error code for the error that prevented a code snippet from being retrieved.

      " + }, + "errorMessage":{ + "shape":"NonEmptyString", + "documentation":"

      The error message received when Amazon Inspector failed to retrieve a code snippet.

      " + }, + "findingArn":{ + "shape":"FindingArn", + "documentation":"

      The ARN of the finding that a code snippet couldn't be retrieved for.

      " + } + }, + "documentation":"

      Contains information about any errors encountered while trying to retrieve a code snippet.

      " + }, + "CodeSnippetErrorCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "ACCESS_DENIED", + "CODE_SNIPPET_NOT_FOUND", + "INVALID_INPUT" + ] + }, + "CodeSnippetErrorList":{ + "type":"list", + "member":{"shape":"CodeSnippetError"} + }, + "CodeSnippetResult":{ + "type":"structure", + "members":{ + "codeSnippet":{ + "shape":"CodeLineList", + "documentation":"

      Contains information on the retrieved code snippet.

      " + }, + "endLine":{ + "shape":"Integer", + "documentation":"

      The line number of the last line of a code snippet.

      " + }, + "findingArn":{ + "shape":"FindingArn", + "documentation":"

      The ARN of a finding that the code snippet is associated with.

      " + }, + "startLine":{ + "shape":"Integer", + "documentation":"

      The line number of the first line of a code snippet.

      " + }, + "suggestedFixes":{ + "shape":"SuggestedFixes", + "documentation":"

      Details of a suggested code fix.

      " + } + }, + "documentation":"

      Contains information on a code snippet retrieved by Amazon Inspector from a code vulnerability finding.

      " + }, + "CodeSnippetResultList":{ + "type":"list", + "member":{"shape":"CodeSnippetResult"} + }, + "CodeVulnerabilityDetails":{ + "type":"structure", + "required":[ + "cwes", + "detectorId", + "detectorName", + "filePath" + ], + "members":{ + "cwes":{ + "shape":"CweList", + "documentation":"

      The Common Weakness Enumeration (CWE) item associated with the detected vulnerability.

      " + }, + "detectorId":{ + "shape":"NonEmptyString", + "documentation":"

      The ID for the Amazon CodeGuru detector associated with the finding. For more information on detectors see Amazon CodeGuru Detector Library.

      " + }, + "detectorName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the detector used to identify the code vulnerability. For more information on detectors see CodeGuru Detector Library.

      " + }, + "detectorTags":{ + "shape":"DetectorTagList", + "documentation":"

      The detector tag associated with the vulnerability. Detector tags group related vulnerabilities by common themes or tactics. For a list of available tags by programming language, see Java tags, or Python tags.

      " + }, + "filePath":{ + "shape":"CodeFilePath", + "documentation":"

      Contains information on where the code vulnerability is located in your code.

      " + }, + "referenceUrls":{ + "shape":"ReferenceUrls", + "documentation":"

      A URL containing supporting documentation about the code vulnerability detected.

      " + }, + "ruleId":{ + "shape":"NonEmptyString", + "documentation":"

      The identifier for a rule that was used to detect the code vulnerability.

      " + }, + "sourceLambdaLayerArn":{ + "shape":"LambdaLayerArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Lambda layer that the code vulnerability was detected in.

      " + } + }, + "documentation":"

      Contains information on the code vulnerability identified in your Lambda function.

      " + }, "Component":{"type":"string"}, "ComponentType":{"type":"string"}, "ConflictException":{ @@ -1506,6 +1856,26 @@ "max":5, "min":1 }, + "CoverageDateFilter":{ + "type":"structure", + "members":{ + "endInclusive":{ + "shape":"DateTimeTimestamp", + "documentation":"

      A timestamp representing the end of the time period to filter results by.

      " + }, + "startInclusive":{ + "shape":"DateTimeTimestamp", + "documentation":"

      A timestamp representing the start of the time period to filter results by.

      " + } + }, + "documentation":"

      Contains details of a coverage date filter.

      " + }, + "CoverageDateFilterList":{ + "type":"list", + "member":{"shape":"CoverageDateFilter"}, + "max":10, + "min":1 + }, "CoverageFilterCriteria":{ "type":"structure", "members":{ @@ -1537,13 +1907,17 @@ "shape":"CoverageMapFilterList", "documentation":"

      Returns coverage statistics for AWS Lambda functions filtered by tag.

      " }, + "lastScannedAt":{ + "shape":"CoverageDateFilterList", + "documentation":"

      Filters Amazon Web Services resources based on whether Amazon Inspector has checked them for vulnerabilities within the specified time range.

      " + }, "resourceId":{ "shape":"CoverageStringFilterList", "documentation":"

      An array of Amazon Web Services resource IDs to return coverage statistics for.

      " }, "resourceType":{ "shape":"CoverageStringFilterList", - "documentation":"

      An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE or AWS_ECR_REPOSITORY.

      " + "documentation":"

      An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE, AWS_LAMBDA_FUNCTION or AWS_ECR_REPOSITORY.

      " }, "scanStatusCode":{ "shape":"CoverageStringFilterList", @@ -1650,6 +2024,10 @@ "shape":"AccountId", "documentation":"

      The Amazon Web Services account ID of the covered resource.

      " }, + "lastScannedAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

      The date and time the resource was last checked for vulnerabilities.

      " + }, "resourceId":{ "shape":"ResourceId", "documentation":"

      The ID of the covered resource.

      " @@ -1751,6 +2129,33 @@ } } }, + "CreateSbomExportRequest":{ + "type":"structure", + "required":[ + "reportFormat", + "s3Destination" + ], + "members":{ + "reportFormat":{ + "shape":"SbomReportFormat", + "documentation":"

      The output format for the software bill of materials (SBOM) report.

      " + }, + "resourceFilterCriteria":{ + "shape":"ResourceFilterCriteria", + "documentation":"

      The resource filter criteria for the software bill of materials (SBOM) report.

      " + }, + "s3Destination":{"shape":"Destination"} + } + }, + "CreateSbomExportResponse":{ + "type":"structure", + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

      The report ID for the software bill of materials (SBOM) report.

      " + } + } + }, "Currency":{ "type":"string", "enum":["USD"] @@ -1887,6 +2292,12 @@ "type":"string", "min":0 }, + "CweList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":10, + "min":1 + }, "Cwes":{ "type":"list", "member":{"shape":"Cwe"}, @@ -2020,6 +2431,12 @@ "max":100, "min":0 }, + "DetectorTagList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":10, + "min":1 + }, "DisableDelegatedAdminAccountRequest":{ "type":"structure", "required":["delegatedAdminAccountId"], @@ -2367,7 +2784,22 @@ }, "documentation":"

      Details about the Exploit Prediction Scoring System (EPSS) score.

      " }, + "EpssDetails":{ + "type":"structure", + "members":{ + "score":{ + "shape":"EpssScoreValue", + "documentation":"

      The EPSS score.

      " + } + }, + "documentation":"

      Details about the Exploit Prediction Scoring System (EPSS) score for a finding.

      " + }, "EpssScore":{"type":"double"}, + "EpssScoreValue":{ + "type":"double", + "max":1.0, + "min":0.0 + }, "ErrorCode":{ "type":"string", "enum":[ @@ -2577,6 +3009,18 @@ "shape":"StringFilterList", "documentation":"

      Details of the Amazon Web Services account IDs used to filter findings.

      " }, + "codeVulnerabilityDetectorName":{ + "shape":"StringFilterList", + "documentation":"

      The name of the detector used to identify a code vulnerability in a Lambda function used to filter findings.

      " + }, + "codeVulnerabilityDetectorTags":{ + "shape":"StringFilterList", + "documentation":"

      The detector type tag associated with the vulnerability used to filter findings. Detector tags group related vulnerabilities by common themes or tactics. For a list of available tags by programming language, see Java tags, or Python tags.

      " + }, + "codeVulnerabilityFilePath":{ + "shape":"StringFilterList", + "documentation":"

      The file path to the file in a Lambda function that contains a code vulnerability used to filter findings.

      " + }, "componentId":{ "shape":"StringFilterList", "documentation":"

      Details of the component IDs used to filter findings.

      " @@ -2621,6 +3065,10 @@ "shape":"StringFilterList", "documentation":"

      The tags attached to the Amazon ECR container image.

      " }, + "epssScore":{ + "shape":"NumberFilterList", + "documentation":"

      The EPSS score used to filter findings.

      " + }, "exploitAvailable":{ "shape":"StringFilterList", "documentation":"

      Filters the list of AWS Lambda findings by the availability of exploits.

      " @@ -2766,10 +3214,18 @@ "shape":"AccountId", "documentation":"

      The Amazon Web Services account ID associated with the finding.

      " }, + "codeVulnerabilityDetails":{ + "shape":"CodeVulnerabilityDetails", + "documentation":"

      Details about the code vulnerability identified in a Lambda function used to filter findings.

      " + }, "description":{ "shape":"FindingDescription", "documentation":"

      The description of the finding.

      " }, + "epss":{ + "shape":"EpssDetails", + "documentation":"

      The finding's EPSS score.

      " + }, "exploitAvailable":{ "shape":"ExploitAvailable", "documentation":"

      If a finding discovered in your environment has an exploit available.

      " @@ -2875,7 +3331,8 @@ "type":"string", "enum":[ "NETWORK_REACHABILITY", - "PACKAGE_VULNERABILITY" + "PACKAGE_VULNERABILITY", + "CODE_VULNERABILITY" ] }, "FindingTypeAggregation":{ @@ -3031,7 +3488,8 @@ "enum":[ "EC2", "ECR", - "LAMBDA" + "LAMBDA", + "LAMBDA_CODE" ] }, "FunctionName":{ @@ -3092,6 +3550,37 @@ } } }, + "GetEncryptionKeyRequest":{ + "type":"structure", + "required":[ + "resourceType", + "scanType" + ], + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The resource type the key encrypts.

      ", + "location":"querystring", + "locationName":"resourceType" + }, + "scanType":{ + "shape":"ScanType", + "documentation":"

      The scan type the key encrypts.

      ", + "location":"querystring", + "locationName":"scanType" + } + } + }, + "GetEncryptionKeyResponse":{ + "type":"structure", + "required":["kmsKeyId"], + "members":{ + "kmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

      A kms key ID.

      " + } + } + }, "GetFindingsReportStatusRequest":{ "type":"structure", "members":{ @@ -3149,6 +3638,46 @@ } } }, + "GetSbomExportRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

      The report ID of the SBOM export to get details for.

      " + } + } + }, + "GetSbomExportResponse":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"ReportingErrorCode", + "documentation":"

      An error code.

      " + }, + "errorMessage":{ + "shape":"NonEmptyString", + "documentation":"

      An error message.

      " + }, + "filterCriteria":{ + "shape":"ResourceFilterCriteria", + "documentation":"

      Contains details about the resource filter criteria used for the software bill of materials (SBOM) report.

      " + }, + "format":{ + "shape":"SbomReportFormat", + "documentation":"

      The format of the software bill of materials (SBOM) report.

      " + }, + "reportId":{ + "shape":"ReportId", + "documentation":"

      The report ID of the software bill of materials (SBOM) report.

      " + }, + "s3Destination":{"shape":"Destination"}, + "status":{ + "shape":"ExternalReportStatus", + "documentation":"

      The status of the software bill of materials (SBOM) report.

      " + } + } + }, "GroupKey":{ "type":"string", "enum":[ @@ -3286,6 +3815,10 @@ "type":"list", "member":{"shape":"IpV6Address"} }, + "KmsKeyArn":{ + "type":"string", + "pattern":"^arn:aws(-(us-gov|cn))?:kms:([a-z0-9][-.a-z0-9]{0,62})?:[0-9]{12}?:key/(([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})|(mrk-[0-9a-zA-Z]{32}))$" + }, "LambdaFunctionAggregation":{ "type":"structure", "members":{ @@ -4344,6 +4877,12 @@ }, "documentation":"

      Details about the recommended course of action to remediate the finding.

      " }, + "ReferenceUrls":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":10, + "min":1 + }, "RelatedVulnerabilities":{ "type":"list", "member":{"shape":"RelatedVulnerability"}, @@ -4453,6 +4992,28 @@ "AFFECTED_IMAGES" ] }, + "ResetEncryptionKeyRequest":{ + "type":"structure", + "required":[ + "resourceType", + "scanType" + ], + "members":{ + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The resource type the key encrypts.

      " + }, + "scanType":{ + "shape":"ScanType", + "documentation":"

      The scan type the key encrypts.

      " + } + } + }, + "ResetEncryptionKeyResponse":{ + "type":"structure", + "members":{ + } + }, "Resource":{ "type":"structure", "required":[ @@ -4505,6 +5066,44 @@ }, "documentation":"

      Contains details about the resource involved in the finding.

      " }, + "ResourceFilterCriteria":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The account IDs used as resource filter criteria.

      " + }, + "ec2InstanceTags":{ + "shape":"ResourceMapFilterList", + "documentation":"

      The EC2 instance tags used as resource filter criteria.

      " + }, + "ecrImageTags":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The ECR image tags used as resource filter criteria.

      " + }, + "ecrRepositoryName":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The ECR repository names used as resource filter criteria.

      " + }, + "lambdaFunctionName":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The AWS Lambda function name used as resource filter criteria.

      " + }, + "lambdaFunctionTags":{ + "shape":"ResourceMapFilterList", + "documentation":"

      The AWS Lambda function tags used as resource filter criteria.

      " + }, + "resourceId":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The resource IDs used as resource filter criteria.

      " + }, + "resourceType":{ + "shape":"ResourceStringFilterList", + "documentation":"

      The resource types used as resource filter criteria.

      " + } + }, + "documentation":"

      The resource filter criteria for a Software bill of materials (SBOM) report.

      " + }, "ResourceId":{ "type":"string", "max":341, @@ -4517,6 +5116,38 @@ "max":10, "min":1 }, + "ResourceMapComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "ResourceMapFilter":{ + "type":"structure", + "required":[ + "comparison", + "key" + ], + "members":{ + "comparison":{ + "shape":"ResourceMapComparison", + "documentation":"

      The filter's comparison.

      " + }, + "key":{ + "shape":"NonEmptyString", + "documentation":"

      The filter's key.

      " + }, + "value":{ + "shape":"NonEmptyString", + "documentation":"

      The filter's value.

      " + } + }, + "documentation":"

      A resource map filter for a software bill of material report.

      " + }, + "ResourceMapFilterList":{ + "type":"list", + "member":{"shape":"ResourceMapFilter"}, + "max":10, + "min":1 + }, "ResourceNotFoundException":{ "type":"structure", "required":["message"], @@ -4557,7 +5188,8 @@ "enum":[ "EC2", "ECR", - "LAMBDA" + "LAMBDA", + "LAMBDA_CODE" ] }, "ResourceState":{ @@ -4575,7 +5207,8 @@ "shape":"State", "documentation":"

      An object detailing the state of Amazon Inspector scanning for Amazon ECR resources.

      " }, - "lambda":{"shape":"State"} + "lambda":{"shape":"State"}, + "lambdaCode":{"shape":"State"} }, "documentation":"

      Details the state of Amazon Inspector for each resource type Amazon Inspector scans.

      " }, @@ -4597,10 +5230,50 @@ "lambda":{ "shape":"Status", "documentation":"

      The status of Amazon Inspector scanning for AWS Lambda function.

      " + }, + "lambdaCode":{ + "shape":"Status", + "documentation":"

      The status of Amazon Inspector scanning for custom application code for Amazon Web Services Lambda functions.

      " } }, "documentation":"

      Details the status of Amazon Inspector for each resource type Amazon Inspector scans.

      " }, + "ResourceStringComparison":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS" + ] + }, + "ResourceStringFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"ResourceStringComparison", + "documentation":"

      The filter's comparison.

      " + }, + "value":{ + "shape":"ResourceStringInput", + "documentation":"

      The filter's value.

      " + } + }, + "documentation":"

      A resource string filter for a software bill of materials report.

      " + }, + "ResourceStringFilterList":{ + "type":"list", + "member":{"shape":"ResourceStringFilter"}, + "max":10, + "min":1 + }, + "ResourceStringInput":{ + "type":"string", + "max":1024, + "min":1 + }, "ResourceType":{ "type":"string", "enum":[ @@ -4625,7 +5298,16 @@ "PYTHON_3_9", "UNSUPPORTED", "NODEJS_18_X", - "GO_1_X" + "GO_1_X", + "JAVA_17", + "PYTHON_3_10" + ] + }, + "SbomReportFormat":{ + "type":"string", + "enum":[ + "CYCLONEDX_1_4", + "SPDX_2_3" ] }, "ScanStatus":{ @@ -4686,7 +5368,8 @@ "type":"string", "enum":[ "NETWORK", - "PACKAGE" + "PACKAGE", + "CODE" ] }, "SearchVulnerabilitiesFilterCriteria":{ @@ -4835,7 +5518,8 @@ "VULNERABILITY_ID", "VULNERABILITY_SOURCE", "INSPECTOR_SCORE", - "VENDOR_SEVERITY" + "VENDOR_SEVERITY", + "EPSS_SCORE" ] }, "SortOrder":{ @@ -4961,6 +5645,36 @@ "max":16, "min":0 }, + "SuggestedFix":{ + "type":"structure", + "members":{ + "code":{ + "shape":"SuggestedFixCodeString", + "documentation":"

      The fix's code.

      " + }, + "description":{ + "shape":"SuggestedFixDescriptionString", + "documentation":"

      The fix's description.

      " + } + }, + "documentation":"

      A suggested fix for a vulnerability in your Lambda function code.

      " + }, + "SuggestedFixCodeString":{ + "type":"string", + "max":2500, + "min":1 + }, + "SuggestedFixDescriptionString":{ + "type":"string", + "max":1000, + "min":1 + }, + "SuggestedFixes":{ + "type":"list", + "member":{"shape":"SuggestedFix"}, + "max":5, + "min":1 + }, "TagKey":{ "type":"string", "max":128, @@ -5040,6 +5754,10 @@ "TitleAggregation":{ "type":"structure", "members":{ + "findingType":{ + "shape":"AggregationFindingType", + "documentation":"

      The type of finding to aggregate on.

      " + }, "resourceType":{ "shape":"AggregationResourceType", "documentation":"

      The resource type to aggregate on.

      " @@ -5179,6 +5897,33 @@ } } }, + "UpdateEncryptionKeyRequest":{ + "type":"structure", + "required":[ + "kmsKeyId", + "resourceType", + "scanType" + ], + "members":{ + "kmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

      A KMS key ID for the encryption key.

      " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The resource type for the encryption key.

      " + }, + "scanType":{ + "shape":"ScanType", + "documentation":"

      The scan type for the encryption key.

      " + } + } + }, + "UpdateEncryptionKeyResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateFilterRequest":{ "type":"structure", "required":["filterArn"], @@ -5314,7 +6059,8 @@ "EC2_INSTANCE_HOURS", "ECR_INITIAL_SCAN", "ECR_RESCAN", - "LAMBDA_FUNCTION_HOURS" + "LAMBDA_FUNCTION_HOURS", + "LAMBDA_FUNCTION_CODE_HOURS" ] }, "UsageValue":{ @@ -5440,7 +6186,7 @@ }, "epss":{ "shape":"Epss", - "documentation":"

      An object that contains the Exploit Prediction Scoring System (EPSS) score.

      " + "documentation":"

      An object that contains the Exploit Prediction Scoring System (EPSS) score for a vulnerability.

      " }, "exploitObserved":{ "shape":"ExploitObserved", diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 2858a401d86d..2fffb5ce44f6 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 20e9b0dd9486..205c7cb619a5 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/paginators-1.json b/services/iot/src/main/resources/codegen-resources/paginators-1.json index f4bca55b6b5c..d7b2eb6ab351 100644 --- a/services/iot/src/main/resources/codegen-resources/paginators-1.json +++ b/services/iot/src/main/resources/codegen-resources/paginators-1.json @@ -174,6 +174,18 @@ "output_token": "nextMarker", "result_key": "outgoingCertificates" }, + "ListPackageVersions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "packageVersionSummaries" + }, + "ListPackages": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "packageSummaries" + }, "ListPolicies": { "input_token": "marker", "limit_key": "pageSize", diff --git a/services/iot/src/main/resources/codegen-resources/service-2.json b/services/iot/src/main/resources/codegen-resources/service-2.json index 2b8382f1c455..bd20791f4bac 100644 --- a/services/iot/src/main/resources/codegen-resources/service-2.json +++ b/services/iot/src/main/resources/codegen-resources/service-2.json @@ -353,7 +353,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

      Creates an X.509 certificate using the specified certificate signing request.

      Requires permission to access the CreateCertificateFromCsr action.

      The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-25 or NIST P-384 curves. For supported certificates, consult Certificate signing algorithms supported by IoT.

      Reusing the same certificate signing request (CSR) results in a distinct certificate.

      You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory:

      On Linux and OS X, the command is:

      $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

      This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR.

      You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process:

      $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

      On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

      > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

      On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

      > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

      " + "documentation":"

      Creates an X.509 certificate using the specified certificate signing request.

      Requires permission to access the CreateCertificateFromCsr action.

      The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves. For supported certificates, consult Certificate signing algorithms supported by IoT.

      Reusing the same certificate signing request (CSR) results in a distinct certificate.

      You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs. In the following commands, we assume that a set of CSRs are located inside of the directory my-csr-directory:

      On Linux and OS X, the command is:

      $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

      This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr Amazon Web Services CLI command to create a certificate for the corresponding CSR.

      You can also run the aws iot create-certificate-from-csr part of the command in parallel to speed up the certificate creation process:

      $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

      On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

      > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

      On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

      > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

      " }, "CreateCustomMetric":{ "name":"CreateCustomMetric", @@ -541,6 +541,44 @@ ], "documentation":"

      Creates an IoT OTA update on a target group of things or groups.

      Requires permission to access the CreateOTAUpdate action.

      " }, + "CreatePackage":{ + "name":"CreatePackage", + "http":{ + "method":"PUT", + "requestUri":"/packages/{packageName}", + "responseCode":200 + }, + "input":{"shape":"CreatePackageRequest"}, + "output":{"shape":"CreatePackageResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Creates an IoT software package that can be deployed to your fleet.

      Requires permission to access the CreatePackage and GetIndexingConfiguration actions.

      ", + "idempotent":true + }, + "CreatePackageVersion":{ + "name":"CreatePackageVersion", + "http":{ + "method":"PUT", + "requestUri":"/packages/{packageName}/versions/{versionName}", + "responseCode":200 + }, + "input":{"shape":"CreatePackageVersionRequest"}, + "output":{"shape":"CreatePackageVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Creates a new version for an existing IoT software package.

      Requires permission to access the CreatePackageVersion and GetIndexingConfiguration actions.

      ", + "idempotent":true + }, "CreatePolicy":{ "name":"CreatePolicy", "http":{ @@ -1061,6 +1099,40 @@ ], "documentation":"

      Delete an OTA update.

      Requires permission to access the DeleteOTAUpdate action.

      " }, + "DeletePackage":{ + "name":"DeletePackage", + "http":{ + "method":"DELETE", + "requestUri":"/packages/{packageName}", + "responseCode":200 + }, + "input":{"shape":"DeletePackageRequest"}, + "output":{"shape":"DeletePackageResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Deletes a specific version from a software package.

      Note: All package versions must be deleted before deleting the software package.

      Requires permission to access the DeletePackageVersion action.

      ", + "idempotent":true + }, + "DeletePackageVersion":{ + "name":"DeletePackageVersion", + "http":{ + "method":"DELETE", + "requestUri":"/packages/{packageName}/versions/{versionName}", + "responseCode":200 + }, + "input":{"shape":"DeletePackageVersionRequest"}, + "output":{"shape":"DeletePackageVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Deletes a specific version from a software package.

      Note: If a package version is designated as default, you must remove the designation from the package using the UpdatePackage action.

      ", + "idempotent":true + }, "DeletePolicy":{ "name":"DeletePolicy", "http":{ @@ -2133,6 +2205,55 @@ ], "documentation":"

      Gets an OTA update.

      Requires permission to access the GetOTAUpdate action.

      " }, + "GetPackage":{ + "name":"GetPackage", + "http":{ + "method":"GET", + "requestUri":"/packages/{packageName}", + "responseCode":200 + }, + "input":{"shape":"GetPackageRequest"}, + "output":{"shape":"GetPackageResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Gets information about the specified software package.

      Requires permission to access the GetPackage action.

      " + }, + "GetPackageConfiguration":{ + "name":"GetPackageConfiguration", + "http":{ + "method":"GET", + "requestUri":"/package-configuration", + "responseCode":200 + }, + "input":{"shape":"GetPackageConfigurationRequest"}, + "output":{"shape":"GetPackageConfigurationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets information about the specified software package's configuration.

      Requires permission to access the GetPackageConfiguration action.

      " + }, + "GetPackageVersion":{ + "name":"GetPackageVersion", + "http":{ + "method":"GET", + "requestUri":"/packages/{packageName}/versions/{versionName}", + "responseCode":200 + }, + "input":{"shape":"GetPackageVersionRequest"}, + "output":{"shape":"GetPackageVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Gets information about the specified package version.

      Requires permission to access the GetPackageVersion action.

      " + }, "GetPercentiles":{ "name":"GetPercentiles", "http":{ @@ -2724,6 +2845,38 @@ ], "documentation":"

      Lists certificates that are being transferred but not yet accepted.

      Requires permission to access the ListOutgoingCertificates action.

      " }, + "ListPackageVersions":{ + "name":"ListPackageVersions", + "http":{ + "method":"GET", + "requestUri":"/packages/{packageName}/versions", + "responseCode":200 + }, + "input":{"shape":"ListPackageVersionsRequest"}, + "output":{"shape":"ListPackageVersionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Lists the software package versions associated to the account.

      Requires permission to access the ListPackageVersions action.

      " + }, + "ListPackages":{ + "name":"ListPackages", + "http":{ + "method":"GET", + "requestUri":"/packages", + "responseCode":200 + }, + "input":{"shape":"ListPackagesRequest"}, + "output":{"shape":"ListPackagesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Lists the software packages associated to the account.

      Requires permission to access the ListPackages action.

      " + }, "ListPolicies":{ "name":"ListPolicies", "http":{ @@ -3899,6 +4052,59 @@ ], "documentation":"

      Updates the definition for the specified mitigation action.

      Requires permission to access the UpdateMitigationAction action.

      " }, + "UpdatePackage":{ + "name":"UpdatePackage", + "http":{ + "method":"PATCH", + "requestUri":"/packages/{packageName}", + "responseCode":200 + }, + "input":{"shape":"UpdatePackageRequest"}, + "output":{"shape":"UpdatePackageResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Updates the supported fields for a specific package.

      Requires permission to access the UpdatePackage and GetIndexingConfiguration actions.

      ", + "idempotent":true + }, + "UpdatePackageConfiguration":{ + "name":"UpdatePackageConfiguration", + "http":{ + "method":"PATCH", + "requestUri":"/package-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdatePackageConfigurationRequest"}, + "output":{"shape":"UpdatePackageConfigurationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Updates the package configuration.

      Requires permission to access the UpdatePackageConfiguration and iam:PassRole actions.

      ", + "idempotent":true + }, + "UpdatePackageVersion":{ + "name":"UpdatePackageVersion", + "http":{ + "method":"PATCH", + "requestUri":"/packages/{packageName}/versions/{versionName}", + "responseCode":200 + }, + "input":{"shape":"UpdatePackageVersionRequest"}, + "output":{"shape":"UpdatePackageVersionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Updates the supported fields for a specific package version.

      Requires permission to access the UpdatePackageVersion and GetIndexingConfiguration actions.

      ", + "idempotent":true + }, "UpdateProvisioningTemplate":{ "name":"UpdateProvisioningTemplate", "http":{ @@ -6081,6 +6287,12 @@ "min":1, "pattern":"^[a-zA-Z0-9-_]+$" }, + "ClientToken":{ + "type":"string", + "max":64, + "min":36, + "pattern":"\\S{36,64}" + }, "CloudwatchAlarmAction":{ "type":"structure", "required":[ @@ -6276,7 +6488,11 @@ "ConflictException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{"shape":"errorMessage"}, + "resourceId":{ + "shape":"resourceId", + "documentation":"

      A resource with the same name already exists.

      " + } }, "documentation":"

      A resource with the same name already exists.

      ", "error":{"httpStatusCode":409}, @@ -6774,7 +6990,7 @@ }, "documentSource":{ "shape":"JobDocumentSource", - "documentation":"

      An S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document.

      For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0.

      For more information, see Methods for accessing a bucket.

      " + "documentation":"

      An S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document.

      For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0

      For more information, see Methods for accessing a bucket.

      " }, "document":{ "shape":"JobDocument", @@ -6827,6 +7043,10 @@ "schedulingConfig":{ "shape":"SchedulingConfig", "documentation":"

      The configuration that allows you to schedule a job for a future date and time in addition to specifying the end behavior for each job execution.

      " + }, + "destinationPackageVersions":{ + "shape":"DestinationPackageVersions", + "documentation":"

      The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

      Note:The following Length Constraints relates to a single string. Up to five strings are allowed.

      " } } }, @@ -6891,6 +7111,10 @@ "maintenanceWindows":{ "shape":"MaintenanceWindows", "documentation":"

      Allows you to configure an optional maintenance window for the rollout of a job document to all devices in the target group for a job.

      " + }, + "destinationPackageVersions":{ + "shape":"DestinationPackageVersions", + "documentation":"

      The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

      Note:The following Length Constraints relates to a single string. Up to five strings are allowed.

      " } } }, @@ -7072,6 +7296,123 @@ } } }, + "CreatePackageRequest":{ + "type":"structure", + "required":["packageName"], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the new package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      A summary of the package being created. This can be used to outline the package's contents or purpose.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      Metadata that can be used to manage the package.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "CreatePackageResponse":{ + "type":"structure", + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the package.

      " + }, + "packageArn":{ + "shape":"PackageArn", + "documentation":"

      The Amazon Resource Name (ARN) for the package.

      " + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package description.

      " + } + } + }, + "CreatePackageVersionRequest":{ + "type":"structure", + "required":[ + "packageName", + "versionName" + ], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the new package version.

      ", + "location":"uri", + "locationName":"versionName" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      A summary of the package version being created. This can be used to outline the package's contents or purpose.

      " + }, + "attributes":{ + "shape":"ResourceAttributes", + "documentation":"

      Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.

      The combined size of all the attributes on a package version is limited to 3KB.

      " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      Metadata that can be used to manage the package version.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "CreatePackageVersionResponse":{ + "type":"structure", + "members":{ + "packageVersionArn":{ + "shape":"PackageVersionArn", + "documentation":"

      The Amazon Resource Name (ARN) for the package.

      " + }, + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated package.

      " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the new package version.

      " + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package version description.

      " + }, + "attributes":{ + "shape":"ResourceAttributes", + "documentation":"

      Metadata that were added to the package version that can be used to define a package version’s configuration.

      " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

      The status of the package version. For more information, see Package version lifecycle.

      " + }, + "errorReason":{ + "shape":"PackageVersionErrorReason", + "documentation":"

      Error reason for a package version failure during creation or update.

      " + } + } + }, "CreatePolicyRequest":{ "type":"structure", "required":[ @@ -8076,6 +8417,63 @@ "members":{ } }, + "DeletePackageRequest":{ + "type":"structure", + "required":["packageName"], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the target package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeletePackageResponse":{ + "type":"structure", + "members":{ + } + }, + "DeletePackageVersionRequest":{ + "type":"structure", + "required":[ + "packageName", + "versionName" + ], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the target package version.

      ", + "location":"uri", + "locationName":"versionName" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeletePackageVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeletePolicyRequest":{ "type":"structure", "required":["policyName"], @@ -9094,6 +9492,10 @@ "maintenanceWindows":{ "shape":"MaintenanceWindows", "documentation":"

      Allows you to configure an optional maintenance window for the rollout of a job document to all devices in the target group for a job.

      " + }, + "destinationPackageVersions":{ + "shape":"DestinationPackageVersions", + "documentation":"

      The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

      Note:The following Length Constraints relates to a single string. Up to five strings are allowed.

      " } } }, @@ -9665,6 +10067,10 @@ }, "documentation":"

      Describes the location of the updated firmware.

      " }, + "DestinationPackageVersions":{ + "type":"list", + "member":{"shape":"PackageVersionArn"} + }, "DetachPolicyRequest":{ "type":"structure", "required":[ @@ -10080,7 +10486,7 @@ "DurationInMinutes":{ "type":"integer", "max":1430, - "min":30 + "min":1 }, "DurationSeconds":{"type":"integer"}, "DynamicGroupStatus":{ @@ -10264,6 +10670,10 @@ "documentation":"

      The input for the EnableTopicRuleRequest operation.

      " }, "Enabled":{"type":"boolean"}, + "EnabledBoolean":{ + "type":"boolean", + "box":true + }, "EndpointAddress":{"type":"string"}, "EndpointType":{ "type":"string", @@ -10758,6 +11168,123 @@ } } }, + "GetPackageConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "GetPackageConfigurationResponse":{ + "type":"structure", + "members":{ + "versionUpdateByJobsConfig":{ + "shape":"VersionUpdateByJobsConfig", + "documentation":"

      The version that is associated to a specific job.

      " + } + } + }, + "GetPackageRequest":{ + "type":"structure", + "required":["packageName"], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the target package.

      ", + "location":"uri", + "locationName":"packageName" + } + } + }, + "GetPackageResponse":{ + "type":"structure", + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the package.

      " + }, + "packageArn":{ + "shape":"PackageArn", + "documentation":"

      The ARN for the package.

      " + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package description.

      " + }, + "defaultVersionName":{ + "shape":"VersionName", + "documentation":"

      The name of the default package version.

      " + }, + "creationDate":{ + "shape":"CreationDate", + "documentation":"

      The date the package was created.

      " + }, + "lastModifiedDate":{ + "shape":"LastModifiedDate", + "documentation":"

      The date when the package was last updated.

      " + } + } + }, + "GetPackageVersionRequest":{ + "type":"structure", + "required":[ + "packageName", + "versionName" + ], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the target package version.

      ", + "location":"uri", + "locationName":"versionName" + } + } + }, + "GetPackageVersionResponse":{ + "type":"structure", + "members":{ + "packageVersionArn":{ + "shape":"PackageVersionArn", + "documentation":"

      The ARN for the package version.

      " + }, + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the package.

      " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the package version.

      " + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package version description.

      " + }, + "attributes":{ + "shape":"ResourceAttributes", + "documentation":"

      Metadata that were added to the package version that can be used to define a package version’s configuration.

      " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

      The status associated to the package version. For more information, see Package version lifecycle.

      " + }, + "errorReason":{ + "shape":"PackageVersionErrorReason", + "documentation":"

      Error reason for a package version failure during creation or update.

      " + }, + "creationDate":{ + "shape":"CreationDate", + "documentation":"

      The date when the package version was created.

      " + }, + "lastModifiedDate":{ + "shape":"LastModifiedDate", + "documentation":"

      The date when the package version was last updated.

      " + } + } + }, "GetPercentilesRequest":{ "type":"structure", "required":["queryString"], @@ -11524,6 +12051,10 @@ "scheduledJobRollouts":{ "shape":"ScheduledJobRolloutList", "documentation":"

      Displays the next seven maintenance window occurrences and their start times.

      " + }, + "destinationPackageVersions":{ + "shape":"DestinationPackageVersions", + "documentation":"

      The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

      Note:The following Length Constraints relates to a single string. Up to five strings are allowed.

      " } }, "documentation":"

      The Job object contains details about a job.

      " @@ -13213,6 +13744,79 @@ }, "documentation":"

      The output from the ListOutgoingCertificates operation.

      " }, + "ListPackageVersionsRequest":{ + "type":"structure", + "required":["packageName"], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the target package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

      The status of the package version. For more information, see Package version lifecycle.

      ", + "location":"querystring", + "locationName":"status" + }, + "maxResults":{ + "shape":"PackageCatalogMaxResults", + "documentation":"

      The maximum number of results to return at one time.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPackageVersionsResponse":{ + "type":"structure", + "members":{ + "packageVersionSummaries":{ + "shape":"PackageVersionSummaryList", + "documentation":"

      Lists the package versions associated to the package.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results.

      " + } + } + }, + "ListPackagesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"PackageCatalogMaxResults", + "documentation":"

      The maximum number of results returned at one time.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPackagesResponse":{ + "type":"structure", + "members":{ + "packageSummaries":{ + "shape":"PackageSummaryList", + "documentation":"

      The software package summary.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results.

      " + } + } + }, "ListPoliciesRequest":{ "type":"structure", "members":{ @@ -15187,6 +15791,97 @@ "member":{"shape":"OutgoingCertificate"} }, "OverrideDynamicGroups":{"type":"boolean"}, + "PackageArn":{"type":"string"}, + "PackageCatalogMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "PackageName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-_.]+" + }, + "PackageSummary":{ + "type":"structure", + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name for the target package.

      " + }, + "defaultVersionName":{ + "shape":"VersionName", + "documentation":"

      The name of the default package version.

      " + }, + "creationDate":{ + "shape":"CreationDate", + "documentation":"

      The date that the package was created.

      " + }, + "lastModifiedDate":{ + "shape":"LastModifiedDate", + "documentation":"

      The date that the package was last updated.

      " + } + }, + "documentation":"

      A summary of information about a software package.

      " + }, + "PackageSummaryList":{ + "type":"list", + "member":{"shape":"PackageSummary"} + }, + "PackageVersionAction":{ + "type":"string", + "enum":[ + "PUBLISH", + "DEPRECATE" + ] + }, + "PackageVersionArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:[!-~]+$" + }, + "PackageVersionErrorReason":{"type":"string"}, + "PackageVersionStatus":{ + "type":"string", + "enum":[ + "DRAFT", + "PUBLISHED", + "DEPRECATED" + ] + }, + "PackageVersionSummary":{ + "type":"structure", + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated software package.

      " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the target package version.

      " + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

      The status of the package version. For more information, see Package version lifecycle.

      " + }, + "creationDate":{ + "shape":"CreationDate", + "documentation":"

      The date that the package version was created.

      " + }, + "lastModifiedDate":{ + "shape":"LastModifiedDate", + "documentation":"

      The date that the package version was last updated.

      " + } + }, + "documentation":"

      A summary of information about a package version.

      " + }, + "PackageVersionSummaryList":{ + "type":"list", + "member":{"shape":"PackageVersionSummary"} + }, "PageSize":{ "type":"integer", "max":250, @@ -16029,6 +16724,29 @@ "key":{"shape":"ResourceLogicalId"}, "value":{"shape":"ResourceArn"} }, + "ResourceAttributeKey":{ + "type":"string", + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, + "ResourceAttributeValue":{ + "type":"string", + "min":1, + "pattern":"[^\\p{C}]+" + }, + "ResourceAttributes":{ + "type":"map", + "key":{"shape":"ResourceAttributeKey"}, + "value":{"shape":"ResourceAttributeValue"}, + "sensitive":true + }, + "ResourceDescription":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[^\\p{C}]+", + "sensitive":true + }, "ResourceIdentifier":{ "type":"structure", "members":{ @@ -16568,6 +17286,15 @@ "pattern":"[\\s\\S]*" }, "ServiceName":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

      A limit has been exceeded.

      ", + "error":{"httpStatusCode":402}, + "exception":true + }, "ServiceType":{ "type":"string", "enum":[ @@ -17245,6 +17972,13 @@ "type":"list", "member":{"shape":"Tag"} }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, "TagResourceRequest":{ "type":"structure", "required":[ @@ -18317,6 +19051,10 @@ "exception":true }, "UndoDeprecate":{"type":"boolean"}, + "UnsetDefaultVersion":{ + "type":"boolean", + "box":true + }, "UnsignedLong":{ "type":"long", "min":0 @@ -18886,6 +19624,108 @@ } } }, + "UpdatePackageConfigurationRequest":{ + "type":"structure", + "members":{ + "versionUpdateByJobsConfig":{ + "shape":"VersionUpdateByJobsConfig", + "documentation":"

      Configuration to manage job's package version reporting. This updates the thing's reserved named shadow that the job targets.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "UpdatePackageConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdatePackageRequest":{ + "type":"structure", + "required":["packageName"], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the target package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package description.

      " + }, + "defaultVersionName":{ + "shape":"VersionName", + "documentation":"

      The name of the default package version.

      Note: You cannot name a defaultVersion and set unsetDefaultVersion equal to true at the same time.

      " + }, + "unsetDefaultVersion":{ + "shape":"UnsetDefaultVersion", + "documentation":"

      Indicates whether you want to remove the named default package version from the software package. Set as true to remove the default package version.

      Note: You cannot name a defaultVersion and set unsetDefaultVersion equal to true at the same time.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "UpdatePackageResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdatePackageVersionRequest":{ + "type":"structure", + "required":[ + "packageName", + "versionName" + ], + "members":{ + "packageName":{ + "shape":"PackageName", + "documentation":"

      The name of the associated software package.

      ", + "location":"uri", + "locationName":"packageName" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

      The name of the target package version.

      ", + "location":"uri", + "locationName":"versionName" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

      The package version description.

      " + }, + "attributes":{ + "shape":"ResourceAttributes", + "documentation":"

      Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.

      Note: Attributes can be updated only when the package version is in a draft state.

      The combined size of all the attributes on a package version is limited to 3KB.

      " + }, + "action":{ + "shape":"PackageVersionAction", + "documentation":"

      The status that the package version should be assigned. For more information, see Package version lifecycle.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

      ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "UpdatePackageVersionResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateProvisioningTemplateRequest":{ "type":"structure", "required":["templateName"], @@ -19333,6 +20173,15 @@ "type":"list", "member":{"shape":"ValidationError"} }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

      The request is not valid.

      ", + "error":{"httpStatusCode":400}, + "exception":true + }, "Value":{ "type":"string", "max":4096, @@ -19367,7 +20216,27 @@ "error":{"httpStatusCode":409}, "exception":true }, + "VersionName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-_.]+" + }, "VersionNumber":{"type":"long"}, + "VersionUpdateByJobsConfig":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"EnabledBoolean", + "documentation":"

      Indicates whether the Job is enabled or not.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The Amazon Resource Name (ARN) of the role that grants permission to the IoT jobs service to update the reserved named shadow when the job successfully completes.

      " + } + }, + "documentation":"

      Configuration to manage IoT Job's package version reporting. If configured, Jobs updates the thing's reserved named shadow with the package version information up on successful job completion.

      Note: For each job, the destinationPackageVersions attribute has to be set with the correct data for Jobs to report to the thing shadow.

      " + }, "VersionsLimitExceededException":{ "type":"structure", "members":{ diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index c232c8ab224b..7d923ec8133c 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 1fbcd6f6b293..e20f76a5e3ad 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index bf20f2e14502..3d0203ec3b24 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index e8cf9aa01bc8..a31801a6474e 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdataplane/src/main/resources/codegen-resources/endpoint-tests.json b/services/iotdataplane/src/main/resources/codegen-resources/endpoint-tests.json index 6797f07ac82a..76cadc18b9ac 100644 --- a/services/iotdataplane/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/iotdataplane/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -451,8 +451,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -464,8 +464,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -475,8 +475,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -488,8 +488,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -499,8 +499,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -512,8 +512,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -523,8 +523,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -536,8 +536,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -547,8 +547,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -560,8 +560,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -573,8 +573,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -586,8 +586,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -598,8 +598,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -610,8 +610,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/iotdataplane/src/main/resources/codegen-resources/service-2.json b/services/iotdataplane/src/main/resources/codegen-resources/service-2.json index 1caa60198f80..e71a4611a5f3 100644 --- a/services/iotdataplane/src/main/resources/codegen-resources/service-2.json +++ b/services/iotdataplane/src/main/resources/codegen-resources/service-2.json @@ -535,7 +535,7 @@ "type":"string", "max":64, "min":1, - "pattern":"[a-zA-Z0-9:_-]+" + "pattern":"[$a-zA-Z0-9:_-]+" }, "ThingName":{ "type":"string", diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 865954c092e8..432d983f5e62 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json b/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json index 610ba7904949..cf68b11e45fd 100644 --- a/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json +++ b/services/iotdeviceadvisor/src/main/resources/codegen-resources/service-2.json @@ -689,7 +689,7 @@ "RootGroup":{ "type":"string", "max":2048, - "min":1 + "min":0 }, "SelectedTestList":{ "type":"list", @@ -821,7 +821,7 @@ }, "rootGroup":{ "shape":"RootGroup", - "documentation":"

      Gets the test suite root group. This is a required parameter.

      " + "documentation":"

      Gets the test suite root group. This is a required parameter. For updating or creating the latest qualification suite, if intendedForQualification is set to true, rootGroup can be an empty string. If intendedForQualification is false, rootGroup cannot be an empty string. If rootGroup is empty, and intendedForQualification is set to true, all the qualification tests are included, and the configuration is default.

      For a qualification suite, the minimum length is 0, and the maximum is 2048. For a non-qualification suite, the minimum length is 1, and the maximum is 2048.

      " }, "devicePermissionRoleArn":{ "shape":"AmazonResourceName", @@ -1081,7 +1081,7 @@ }, "systemMessage":{ "shape":"SystemMessage", - "documentation":"

      " + "documentation":"

      Provides test case scenario system messages if any.

      " } }, "documentation":"

      Provides test case scenario.

      " diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 724ed2484bc2..5adb030552e5 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index a777751b307c..c3ac1502b063 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index aeb73307fc2b..1e00f06d05bc 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 685e148e4611..3022cf9ea949 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-rule-set.json index 070d23f3f915..430fc0498bd1 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://iotfleetwise-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://iotfleetwise-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iotfleetwise-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://iotfleetwise.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://iotfleetwise-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://iotfleetwise.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://iotfleetwise.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://iotfleetwise.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-tests.json b/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-tests.json index 5f88f82e3039..85b3dd377b28 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/iotfleetwise/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,120 +1,294 @@ { "testCases": [ { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://iotfleetwise-fips.eu-central-1.api.aws" + "url": "https://iotfleetwise.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "eu-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://iotfleetwise-fips.eu-central-1.amazonaws.com" + "url": "https://iotfleetwise.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://iotfleetwise.eu-central-1.api.aws" + "url": "https://iotfleetwise-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://iotfleetwise.eu-central-1.amazonaws.com" + "url": "https://iotfleetwise-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://iotfleetwise-fips.us-east-1.api.aws" + "url": "https://iotfleetwise.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://iotfleetwise-fips.us-east-1.amazonaws.com" + "url": "https://iotfleetwise-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://iotfleetwise.us-east-1.api.aws" + "url": "https://iotfleetwise-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://iotfleetwise.us-east-1.amazonaws.com" + "url": "https://iotfleetwise.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://iotfleetwise.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -124,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -136,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json index d597cdbf63ba..244fe0b58a2c 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json +++ b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json @@ -183,7 +183,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

      Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model.

      If you have an existing Amazon Web Services IoT Thing, you can use Amazon Web Services IoT FleetWise to create a vehicle and collect data from your thing.

      For more information, see Create a vehicle (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.

      ", + "documentation":"

      Creates a vehicle, which is an instance of a vehicle model (model manifest). Vehicles created from the same vehicle model consist of the same signals inherited from the vehicle model.

      If you have an existing Amazon Web Services IoT thing, you can use Amazon Web Services IoT FleetWise to create a vehicle and collect data from your thing.

      For more information, see Create a vehicle (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.

      ", "idempotent":true }, "DeleteCampaign":{ @@ -754,7 +754,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

      Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see Setting up Amazon Web Services IoT FleetWise.

      An Amazon Web Services account is not the same thing as a \"user account\". An Amazon Web Services user is an identity that you create using Identity and Access Management (IAM) and takes the form of either an IAM user or an IAM role, both with credentials. A single Amazon Web Services account can, and typically does, contain many users and roles.

      " + "documentation":"

      This API operation contains deprecated parameters. Register your account again without the Timestream resources parameter so that Amazon Web Services IoT FleetWise can remove the Timestream metadata stored. You should then pass the data destination into the CreateCampaign API operation.

      You must delete any existing campaigns that include an empty data destination before you register your account again. For more information, see the DeleteCampaign API operation.

      If you want to delete the Timestream inline policy from the service-linked role, such as to mitigate an overly permissive policy, you must first delete any existing campaigns. Then delete the service-linked role and register your account again to enable CloudWatch metrics. For more information, see DeleteServiceLinkedRole in the Identity and Access Management API Reference.

       <p>Registers your Amazon Web Services account, IAM, and Amazon Timestream resources so Amazon Web Services IoT FleetWise can transfer your vehicle data to the Amazon Web Services Cloud. For more information, including step-by-step procedures, see <a href="https://docs.aws.amazon.com/iot-fleetwise/latest/developerguide/setting-up.html">Setting up Amazon Web Services IoT FleetWise</a>. </p> <note> <p>An Amazon Web Services account is <b>not</b> the same thing as a "user." An <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction_identity-management.html#intro-identity-users">Amazon Web Services user</a> is an identity that you create using Identity and Access Management (IAM) and takes the form of either an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html">IAM user</a> or an <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html">IAM role, both with credentials</a>. A single Amazon Web Services account can, and typically does, contain many users and roles.</p> </note> 
      " }, "TagResource":{ "name":"TagResource", @@ -960,6 +960,14 @@ "documentation":"

      A specified value for the actuator.

      ", "deprecated":true, "deprecatedMessage":"assignedValue is no longer in use" + }, + "deprecationMessage":{ + "shape":"message", + "documentation":"

      The deprecation message for the node or the branch that was moved or deleted.

      " + }, + "comment":{ + "shape":"message", + "documentation":"

      A comment in addition to the description.

      " } }, "documentation":"

      A signal that represents a vehicle device such as the engine, heater, and door locks. Data from an actuator reports the state of a certain vehicle device.

      Updating actuator data can change the state of a device. For example, you can turn on or off the heater by updating its actuator data.

      " @@ -1035,6 +1043,14 @@ "defaultValue":{ "shape":"string", "documentation":"

      The default value of the attribute.

      " + }, + "deprecationMessage":{ + "shape":"message", + "documentation":"

      The deprecation message for the node or the branch that was moved or deleted.

      " + }, + "comment":{ + "shape":"message", + "documentation":"

      A comment in addition to the description.

      " } }, "documentation":"

      A signal that represents static information about the vehicle, such as engine type or manufacturing date.

      " @@ -1096,6 +1112,14 @@ "description":{ "shape":"description", "documentation":"

      A brief description of the branch.

      " + }, + "deprecationMessage":{ + "shape":"message", + "documentation":"

      The deprecation message for the node or the branch that was moved or deleted.

      " + }, + "comment":{ + "shape":"message", + "documentation":"

      A comment in addition to the description.

      " } }, "documentation":"

      A group of signals that are defined in a hierarchical structure.

      " @@ -1223,11 +1247,11 @@ }, "startBit":{ "shape":"nonNegativeInteger", - "documentation":"

      Indicates the beginning of the CAN message.

      " + "documentation":"

      Indicates the beginning of the CAN signal. This should always be the least significant bit (LSB).

      This value might be different from the value in a DBC file. For little endian signals, startBit is the same value as in the DBC file. For big endian signals in a DBC file, the start bit is the most significant bit (MSB). You will have to calculate the LSB instead and pass it as the startBit.

      " }, "offset":{ "shape":"double", - "documentation":"

      Indicates where data appears in the CAN message.

      " + "documentation":"

      The offset used to calculate the signal value. Combined with factor, the calculation is value = raw_value * factor + offset.

      " }, "factor":{ "shape":"double", @@ -1367,7 +1391,7 @@ }, "expiryTime":{ "shape":"timestamp", - "documentation":"

      (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data won't be collected after the campaign expires.

      Default: 253402214400 (December 31, 9999, 00:00:00 UTC)

      " + "documentation":"

      (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires.

      Default: 253402214400 (December 31, 9999, 00:00:00 UTC)

      " }, "postTriggerCollectionDuration":{ "shape":"uint32", @@ -1404,6 +1428,10 @@ "tags":{ "shape":"TagList", "documentation":"

      Metadata that can be used to manage the campaign.

      " + }, + "dataDestinationConfigs":{ + "shape":"DataDestinationConfigs", + "documentation":"

      The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream.

      Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics.

      You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.

      " } } }, @@ -1638,7 +1666,7 @@ }, "attributes":{ "shape":"attributesMap", - "documentation":"

      Static information about a vehicle in a key-value pair. For example: \"engineType\" : \"1.3 L R2\"

      " + "documentation":"

      Static information about a vehicle in a key-value pair. For example: \"engineType\" : \"1.3 L R2\"

      A campaign must include the keys (attribute names) in dataExtraDimensions for them to display in Amazon Timestream.

      " }, "associationBehavior":{ "shape":"VehicleAssociationBehavior", @@ -1720,12 +1748,40 @@ }, "documentation":"

      Information about a created vehicle.

      " }, + "DataDestinationConfig":{ + "type":"structure", + "members":{ + "s3Config":{ + "shape":"S3Config", + "documentation":"

      The Amazon S3 bucket where the Amazon Web Services IoT FleetWise campaign sends data.

      " + }, + "timestreamConfig":{ + "shape":"TimestreamConfig", + "documentation":"

      The Amazon Timestream table where the campaign sends data.

      " + } + }, + "documentation":"

      The destination where the Amazon Web Services IoT FleetWise campaign sends data. You can send data to be stored in Amazon S3 or Amazon Timestream.

      ", + "union":true + }, + "DataDestinationConfigs":{ + "type":"list", + "member":{"shape":"DataDestinationConfig"}, + "max":1, + "min":1 + }, "DataExtraDimensionNodePathList":{ "type":"list", "member":{"shape":"NodePath"}, "max":5, "min":0 }, + "DataFormat":{ + "type":"string", + "enum":[ + "JSON", + "PARQUET" + ] + }, "DecoderManifestSummary":{ "type":"structure", "required":[ @@ -2007,7 +2063,7 @@ "documentation":"

      Provides the VSS in JSON format.

      " } }, - "documentation":"

      Vehicle Signal Specification (VSS) is a precise language used to describe and model signals in vehicle networks. The JSON file collects signal specificiations in a VSS format.

      ", + "documentation":"

      Vehicle Signal Specification (VSS) is a precise language used to describe and model signals in vehicle networks. The JSON file collects signal specificiations in a VSS format.

      ", "union":true }, "Fqns":{ @@ -2105,6 +2161,10 @@ "lastModificationTime":{ "shape":"timestamp", "documentation":"

      The last time the campaign was modified.

      " + }, + "dataDestinationConfigs":{ + "shape":"DataDestinationConfigs", + "documentation":"

      The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream.

      Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics.

      You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.

      " } } }, @@ -2277,7 +2337,6 @@ "required":[ "customerAccountId", "accountStatus", - "timestreamRegistrationResponse", "iamRegistrationResponse", "creationTime", "lastModificationTime" @@ -3355,7 +3414,7 @@ }, "offset":{ "shape":"double", - "documentation":"

      Indicates where data appears in the message.

      " + "documentation":"

      The offset used to calculate the signal value. Combined with scaling, the calculation is value = raw_value * scaling + offset.

      " }, "startByte":{ "shape":"nonNegativeInteger", @@ -3381,6 +3440,12 @@ "max":50, "min":1 }, + "Prefix":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[a-zA-Z0-9-_:./!*'()]+" + }, "ProtocolName":{ "type":"string", "max":50, @@ -3408,9 +3473,12 @@ }, "RegisterAccountRequest":{ "type":"structure", - "required":["timestreamResources"], "members":{ - "timestreamResources":{"shape":"TimestreamResources"}, + "timestreamResources":{ + "shape":"TimestreamResources", + "deprecated":true, + "deprecatedMessage":"Amazon Timestream metadata is now passed in the CreateCampaign API." + }, "iamResources":{ "shape":"IamResources", "documentation":"

      The IAM resource that allows Amazon Web Services IoT FleetWise to send data to Amazon Timestream.

      ", @@ -3423,7 +3491,6 @@ "type":"structure", "required":[ "registerAccountStatus", - "timestreamResources", "iamResources", "creationTime", "lastModificationTime" @@ -3478,6 +3545,35 @@ "exception":true }, "RetryAfterSeconds":{"type":"integer"}, + "S3BucketArn":{ + "type":"string", + "max":100, + "min":16, + "pattern":"arn:(aws[a-zA-Z0-9-]*):s3:::.+" + }, + "S3Config":{ + "type":"structure", + "required":["bucketArn"], + "members":{ + "bucketArn":{ + "shape":"S3BucketArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon S3 bucket.

      " + }, + "dataFormat":{ + "shape":"DataFormat", + "documentation":"

      Specify the format that files are saved in the Amazon S3 bucket. You can save files in an Apache Parquet or JSON format.

      • Parquet - Store data in a columnar storage file format. Parquet is optimal for fast data retrieval and can reduce costs. This option is selected by default.

      • JSON - Store data in a standard text-based JSON file format.

      " + }, + "storageCompressionFormat":{ + "shape":"StorageCompressionFormat", + "documentation":"

      By default, stored data is compressed as a .gzip file. Compressed files have a reduced file size, which can optimize the cost of data storage.

      " + }, + "prefix":{ + "shape":"Prefix", + "documentation":"

      (Optional) Enter an S3 bucket prefix. The prefix is the string of characters after the bucket name and before the object name. You can use the prefix to organize data stored in Amazon S3 buckets. For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.

      By default, Amazon Web Services IoT FleetWise sets the prefix processed-data/year=YY/month=MM/date=DD/hour=HH/ (in UTC) to data it delivers to Amazon S3. You can enter a prefix to append it to this default prefix. For example, if you enter the prefix vehicles, the prefix will be vehicles/processed-data/year=YY/month=MM/date=DD/hour=HH/.

      " + } + }, + "documentation":"

      The Amazon S3 bucket where the Amazon Web Services IoT FleetWise campaign sends data. Amazon S3 is an object storage service that stores data as objects within buckets. For more information, see Creating, configuring, and working with Amazon S3 buckets in the Amazon Simple Storage Service User Guide.

      " + }, "Sensor":{ "type":"structure", "required":[ @@ -3512,6 +3608,14 @@ "max":{ "shape":"double", "documentation":"

      The specified possible maximum value of the sensor.

      " + }, + "deprecationMessage":{ + "shape":"message", + "documentation":"

      The deprecation message for the node or the branch that was moved or deleted.

      " + }, + "comment":{ + "shape":"message", + "documentation":"

      A comment in addition to the description.

      " } }, "documentation":"

      An input component that reports the environmental condition of a vehicle.

      You can collect data about fluid levels, temperatures, vibrations, or battery voltage from sensors.

      " @@ -3628,6 +3732,13 @@ "TO_DISK" ] }, + "StorageCompressionFormat":{ + "type":"string", + "enum":[ + "NONE", + "GZIP" + ] + }, "String":{"type":"string"}, "Tag":{ "type":"structure", @@ -3723,6 +3834,24 @@ }, "documentation":"

      Information about a collection scheme that uses a time period to decide how often to collect data.

      " }, + "TimestreamConfig":{ + "type":"structure", + "required":[ + "timestreamTableArn", + "executionRoleArn" + ], + "members":{ + "timestreamTableArn":{ + "shape":"TimestreamTableArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Timestream table.

      " + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

      The Amazon Resource Name (ARN) of the task execution role that grants Amazon Web Services IoT FleetWise permission to deliver data to the Amazon Timestream table.

      " + } + }, + "documentation":"

      The Amazon Timestream table where the Amazon Web Services IoT FleetWise campaign sends data. Timestream stores and organizes data to optimize query processing time and to reduce storage costs. For more information, see Data modeling in the Amazon Timestream Developer Guide.

      " + }, "TimestreamDatabaseName":{ "type":"string", "max":255, @@ -3782,6 +3911,12 @@ }, "documentation":"

      The registered Amazon Timestream resources that Amazon Web Services IoT FleetWise edge agent software can transfer your vehicle data to.

      " }, + "TimestreamTableArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:(aws[a-zA-Z0-9-]*):timestream:[a-zA-Z0-9-]+:[0-9]{12}:database/[a-zA-Z0-9_.-]+/table/[a-zA-Z0-9_.-]+" + }, "TimestreamTableName":{ "type":"string", "max":255, @@ -3847,7 +3982,7 @@ }, "action":{ "shape":"UpdateCampaignAction", - "documentation":"

      Specifies how to update a campaign. The action can be one of the following:

      • APPROVE - To approve delivering a data collection scheme to vehicles.

      • SUSPEND - To suspend collecting signal data.

      • RESUME - To resume collecting signal data.

      • UPDATE - To update a campaign.

      " + "documentation":"

      Specifies how to update a campaign. The action can be one of the following:

      • APPROVE - To approve delivering a data collection scheme to vehicles.

      • SUSPEND - To suspend collecting signal data. The campaign is deleted from vehicles and all vehicles in the suspended campaign will stop sending data.

      • RESUME - To reactivate the SUSPEND campaign. The campaign is redeployed to all vehicles and the vehicles will resume sending data.

      • UPDATE - To update a campaign.

      " } } }, @@ -4375,6 +4510,12 @@ "max":4294967295, "min":1 }, + "message":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, "modelManifestSummaries":{ "type":"list", "member":{"shape":"ModelManifestSummary"} diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 2fef88f81d27..4bda75677f4d 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index f9846de09da2..32adf49d69e8 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 387810eb5867..e76d5e1f7825 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 3b0a4a124e5f..b7590a743318 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotsitewise/src/main/resources/codegen-resources/customization.config b/services/iotsitewise/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index cc87b98797fd..3bda3f8a24dd 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index 4224c35f9514..2254ffd76abb 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iottwinmaker/src/main/resources/codegen-resources/customization.config b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index e4f01bb2278e..5263d1ae2096 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/iotwireless/src/main/resources/codegen-resources/service-2.json b/services/iotwireless/src/main/resources/codegen-resources/service-2.json index 98a3c110d17b..fde87c0f5397 100644 --- a/services/iotwireless/src/main/resources/codegen-resources/service-2.json +++ b/services/iotwireless/src/main/resources/codegen-resources/service-2.json @@ -2756,6 +2756,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "idempotencyToken":true + }, + "MulticastGroups":{ + "shape":"NetworkAnalyzerMulticastGroupList", + "documentation":"

      Multicast Group resources to add to the network analyzer configruation. Provide the MulticastGroupId of the resource to add in the input array.

      " } } }, @@ -3813,12 +3817,12 @@ }, "FragmentIntervalMS":{ "type":"integer", - "documentation":"

      The interval of sending fragments in milliseconds. Currently the interval will be rounded to the nearest second. Note that this interval only controls the timing when the cloud sends the fragments down. The actual delay of receiving fragments at device side depends on the device's class and the communication delay with the cloud.

      ", + "documentation":"

      The interval for sending fragments in milliseconds, rounded to the nearest second.

      This interval only determines the timing for when the Cloud sends down the fragments to yor device. There can be a delay for when your device will receive these fragments. This delay depends on the device's class and the communication delay with the cloud.

      ", "min":1 }, "FragmentSizeBytes":{ "type":"integer", - "documentation":"

      The size of each fragment in bytes. Currently only supported in fuota tasks with multicast groups.

      ", + "documentation":"

      The size of each fragment in bytes. This parameter is supported only for FUOTA tasks with multicast groups.

      ", "min":1 }, "FuotaDeviceStatus":{ @@ -4137,7 +4141,11 @@ "shape":"NetworkAnalyzerConfigurationArn", "documentation":"

      The Amazon Resource Name of the new resource.

      " }, - "Name":{"shape":"NetworkAnalyzerConfigurationName"} + "Name":{"shape":"NetworkAnalyzerConfigurationName"}, + "MulticastGroups":{ + "shape":"NetworkAnalyzerMulticastGroupList", + "documentation":"

      List of multicast group resources that have been added to the network analyzer configuration.

      " + } } }, "GetPartnerAccountRequest":{ @@ -4623,7 +4631,7 @@ }, "LastUplinkReceivedAt":{ "shape":"ISODateTimeString", - "documentation":"

      The date and time when the most recent uplink was received.

      " + "documentation":"

      The date and time when the most recent uplink was received.

      This value is only valid for 3 months.

      " }, "LoRaWAN":{ "shape":"LoRaWANDeviceMetadata", @@ -4756,7 +4764,7 @@ }, "LastUplinkReceivedAt":{ "shape":"ISODateTimeString", - "documentation":"

      The date and time when the most recent uplink was received.

      " + "documentation":"

      The date and time when the most recent uplink was received.

      This value is only valid for 3 months.

      " }, "ConnectionStatus":{ "shape":"ConnectionStatus", @@ -4822,7 +4830,7 @@ }, "LastUplinkReceivedAt":{ "shape":"ISODateTimeString", - "documentation":"

      The date and time when the most recent uplink was received.

      " + "documentation":"

      The date and time when the most recent uplink was received.

      This value is only valid for 3 months.

      " }, "TaskCreatedAt":{ "shape":"ISODateTimeString", @@ -6462,6 +6470,14 @@ "type":"string", "max":256 }, + "MulticastFrameInfo":{ + "type":"string", + "documentation":"

      FrameInfo of your multicast group resources for the trace content. Use FrameInfo to debug the multicast communication between your LoRaWAN end devices and the network server.

      ", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "MulticastGroup":{ "type":"structure", "members":{ @@ -6559,6 +6575,12 @@ }, "documentation":"

      Network analyzer configurations.

      " }, + "NetworkAnalyzerMulticastGroupList":{ + "type":"list", + "member":{"shape":"MulticastGroupId"}, + "max":10, + "min":0 + }, "NetworkId":{ "type":"integer", "max":65535, @@ -6951,7 +6973,7 @@ "RaAllowed":{"type":"boolean"}, "RedundancyPercent":{ "type":"integer", - "documentation":"

      The percentage of added redundant fragments. For example, if firmware file is 100 bytes and fragment size is 10 bytes, with RedundancyPercent set to 50(%), the final number of encoded fragments is (100 / 10) + (100 / 10 * 50%) = 15.

      ", + "documentation":"

      The percentage of the added fragments that are redundant. For example, if the size of the firmware image file is 100 bytes and the fragment size is 10 bytes, with RedundancyPercent set to 50(%), the final number of encoded fragments is (100 / 10) + (100 / 10 * 50%) = 15.

      ", "max":100, "min":0 }, @@ -7954,7 +7976,8 @@ "type":"structure", "members":{ "WirelessDeviceFrameInfo":{"shape":"WirelessDeviceFrameInfo"}, - "LogLevel":{"shape":"LogLevel"} + "LogLevel":{"shape":"LogLevel"}, + "MulticastFrameInfo":{"shape":"MulticastFrameInfo"} }, "documentation":"

      Trace content for your wireless gateway and wireless device resources.

      " }, @@ -8202,7 +8225,15 @@ "shape":"WirelessGatewayList", "documentation":"

      Wireless gateway resources to remove from the network analyzer configuration. Provide the WirelessGatewayId of the resources to remove in the input array.

      " }, - "Description":{"shape":"Description"} + "Description":{"shape":"Description"}, + "MulticastGroupsToAdd":{ + "shape":"NetworkAnalyzerMulticastGroupList", + "documentation":"

      Multicast group resources to add to the network analyzer configuration. Provide the MulticastGroupId of the resource to add in the input array.

      " + }, + "MulticastGroupsToRemove":{ + "shape":"NetworkAnalyzerMulticastGroupList", + "documentation":"

      Multicast group resources to remove from the network analyzer configuration. Provide the MulticastGroupId of the resource to remove in the input array.

      " + } } }, "UpdateNetworkAnalyzerConfigurationResponse":{ @@ -8807,7 +8838,7 @@ }, "LastUplinkReceivedAt":{ "shape":"ISODateTimeString", - "documentation":"

      The date and time when the most recent uplink was received.

      " + "documentation":"

      The date and time when the most recent uplink was received.

      Theis value is only valid for 3 months.

      " }, "LoRaWAN":{ "shape":"LoRaWANListDevice", @@ -8933,7 +8964,7 @@ }, "LastUplinkReceivedAt":{ "shape":"ISODateTimeString", - "documentation":"

      The date and time when the most recent uplink was received.

      " + "documentation":"

      The date and time when the most recent uplink was received.

      This value is only valid for 3 months.

      " } }, "documentation":"

      Information about a wireless gateway's operation.

      " diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 97351cbbb4e8..325ba8da2b48 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivs/src/main/resources/codegen-resources/endpoint-tests.json b/services/ivs/src/main/resources/codegen-resources/endpoint-tests.json index 893c2009eed2..de653a93c40a 100644 --- a/services/ivs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/ivs/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -229,9 +229,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -240,9 +240,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -253,9 +253,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -264,9 +264,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -277,9 +277,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -288,9 +288,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -301,9 +301,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -312,9 +312,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -325,9 +325,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -338,9 +338,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -363,9 +363,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -375,9 +375,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/services/ivs/src/main/resources/codegen-resources/service-2.json b/services/ivs/src/main/resources/codegen-resources/service-2.json index 0f305c5fb55d..1a8e9808d6bc 100644 --- a/services/ivs/src/main/resources/codegen-resources/service-2.json +++ b/services/ivs/src/main/resources/codegen-resources/service-2.json @@ -467,7 +467,7 @@ {"shape":"PendingVerification"}, {"shape":"ConflictException"} ], - "documentation":"

      Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

      " + "documentation":"

      Updates a channel's configuration. Live channels cannot be updated. You must stop the ongoing stream, update the channel, and restart the stream for the changes to take effect.

      " } }, "shapes":{ @@ -608,6 +608,10 @@ "shape":"PlaybackURL", "documentation":"

      Channel playback URL.

      " }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

      Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

      " + }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

      Recording-configuration ARN. A value other than an empty string indicates that recording is enabled. Default: \"\" (empty string, recording is disabled).

      " @@ -618,7 +622,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. Valid values:

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default.

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p.

      " + "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

      • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

      • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

      • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

      " } }, "documentation":"

      Object specifying a channel.

      " @@ -696,6 +700,10 @@ "shape":"ChannelName", "documentation":"

      Channel name.

      " }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

      Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

      " + }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

      Recording-configuration ARN. A value other than an empty string indicates that recording is enabled. Default: \"\" (empty string, recording is disabled).

      " @@ -703,6 +711,10 @@ "tags":{ "shape":"Tags", "documentation":"

      Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

      " + }, + "type":{ + "shape":"ChannelType", + "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

      • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

      • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

      • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

      " } }, "documentation":"

      Summary information about a channel.

      " @@ -711,7 +723,9 @@ "type":"string", "enum":[ "BASIC", - "STANDARD" + "STANDARD", + "ADVANCED_SD", + "ADVANCED_HD" ] }, "Channels":{ @@ -752,6 +766,10 @@ "shape":"ChannelName", "documentation":"

      Channel name.

      " }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

      Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

      " + }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

      Recording-configuration ARN. Default: \"\" (empty string, recording is disabled).

      " @@ -762,7 +780,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. Valid values:

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default.

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p.

      " + "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

      • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

      • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

      • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

      " } } }, @@ -1926,6 +1944,13 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "TranscodePreset":{ + "type":"string", + "enum":[ + "HIGHER_BANDWIDTH_DELIVERY", + "CONSTRAINED_BANDWIDTH_DELIVERY" + ] + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -1976,13 +2001,17 @@ "shape":"ChannelName", "documentation":"

      Channel name.

      " }, + "preset":{ + "shape":"TranscodePreset", + "documentation":"

      Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

      " + }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

      Recording-configuration ARN. If this is set to an empty string, recording is disabled. A value other than an empty string indicates that recording is enabled

      " }, "type":{ "shape":"ChannelType", - "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately. Valid values:

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default.

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input to viewers. The viewer’s video-quality choice is limited to the original input. Resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p.

      " + "documentation":"

      Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

      • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

      • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

      • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

      Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

      • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

      • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

      " } } }, diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 9e186c1d21ee..b04b21f53f4e 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 8682dc226cbc..7ca7b2c66022 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 9ab4746bc2ab..6de083508b3f 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 2af6382f26a3..5b0bbe4bacd6 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index 6f4afe47326b..7dbf0d8aed0b 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendra/src/main/resources/codegen-resources/service-2.json b/services/kendra/src/main/resources/codegen-resources/service-2.json index 74d8fd7070d6..429a73cc0fd5 100644 --- a/services/kendra/src/main/resources/codegen-resources/service-2.json +++ b/services/kendra/src/main/resources/codegen-resources/service-2.json @@ -926,7 +926,26 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

      Searches an active index. Use this API to search your documents using query. The Query API enables to do faceted search and to filter results based on document attributes.

      It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.

      Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.

      • Relevant passages

      • Matching FAQs

      • Relevant documents

      You can specify that the query return only one type of result using the QueryResultTypeFilter parameter.

      Each query returns the 100 most relevant results.

      " + "documentation":"

      Searches an index given an input query.

      You can configure boosting or relevance tuning at the query level to override boosting at the index level, filter based on document fields/attributes and faceted search, and filter based on the user or their group access to documents. You can also include certain fields in the response that might provide useful additional information.

      A query response contains three types of results.

      • Relevant suggested answers. The answers can be either a text excerpt or table excerpt. The answer can be highlighted in the excerpt.

      • Matching FAQs or questions-answer from your FAQ file.

      • Relevant documents. This result type includes an excerpt of the document with the document title. The searched terms can be highlighted in the excerpt.

      You can specify that the query return only one type of result using the QueryResultTypeFilter parameter. Each query returns the 100 most relevant results. If you filter result type to only question-answers, a maximum of four results are returned. If you filter result type to only answers, a maximum of three results are returned.

      " + }, + "Retrieve":{ + "name":"Retrieve", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RetrieveRequest"}, + "output":{"shape":"RetrieveResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieves relevant passages or text excerpts given an input query.

      This API is similar to the Query API. However, by default, the Query API only returns excerpt passages of up to 100 token words. With the Retrieve API, you can retrieve longer passages of up to 200 token words and up to 100 semantically relevant passages. This doesn't include question-answer or FAQ type responses from your index. The passages are text excerpts that can be semantically extracted from multiple documents and multiple parts of the same document. If in extreme cases your documents produce no relevant passages using the Retrieve API, you can alternatively use the Query API.

      You can also do the following:

      • Override boosting at the index level

      • Filter based on document fields or attributes

      • Filter based on the user or their group access to documents

      You can also include certain fields in the response that might provide useful additional information.

      " }, "StartDataSourceSyncJob":{ "name":"StartDataSourceSyncJob", @@ -1315,7 +1334,7 @@ "documentation":"

      Configuration information for an Amazon Virtual Private Cloud to connect to your Alfresco. For more information, see Configuring a VPC.

      " } }, - "documentation":"

      Provides the configuration information to connect to Alfresco as your data source.

      Alfresco data source connector is currently in preview mode. Basic authentication is currently supported. If you would like to use Alfresco connector in production, contact Support.

      " + "documentation":"

      Provides the configuration information to connect to Alfresco as your data source.

      Support for AlfrescoConfiguration ended May 2023. We recommend migrating to or using the Alfresco data source template schema / TemplateConfiguration API.

      " }, "AlfrescoEntity":{ "type":"string", @@ -2304,6 +2323,7 @@ }, "documentation":"

      Provides the configuration information that's required to connect to a database.

      " }, + "Content":{"type":"string"}, "ContentSourceConfiguration":{ "type":"structure", "members":{ @@ -2562,7 +2582,7 @@ }, "FileFormat":{ "shape":"FaqFileFormat", - "documentation":"

      The format of the FAQ input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.

      The format must match the format of the file stored in the S3 bucket identified in the S3Path parameter.

      For more information, see Adding questions and answers.

      " + "documentation":"

      The format of the FAQ input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.

      The default format is CSV.

      The format must match the format of the file stored in the S3 bucket identified in the S3Path parameter.

      For more information, see Adding questions and answers.

      " }, "ClientToken":{ "shape":"ClientTokenName", @@ -2879,7 +2899,9 @@ }, "AlfrescoConfiguration":{ "shape":"AlfrescoConfiguration", - "documentation":"

      Provides the configuration information to connect to Alfresco as your data source.

      " + "documentation":"

      Provides the configuration information to connect to Alfresco as your data source.

      Support for AlfrescoConfiguration ended May 2023. We recommend migrating to or using the Alfresco data source template schema / TemplateConfiguration API.

      ", + "deprecated":true, + "deprecatedMessage":"Deprecated AlfrescoConfiguration in favor of TemplateConfiguration" }, "TemplateConfiguration":{ "shape":"TemplateConfiguration", @@ -2939,12 +2961,12 @@ "DataSourceInclusionsExclusionsStrings":{ "type":"list", "member":{"shape":"DataSourceInclusionsExclusionsStringsMember"}, - "max":100, + "max":250, "min":0 }, "DataSourceInclusionsExclusionsStringsMember":{ "type":"string", - "max":150, + "max":300, "min":1 }, "DataSourceName":{ @@ -4114,7 +4136,7 @@ }, "ContentType":{ "shape":"ContentType", - "documentation":"

      The file type of the document in the Blob field.

      " + "documentation":"

      The file type of the document in the Blob field.

      If you want to index snippets or subsets of HTML documents instead of the entirety of the HTML documents, you must add the HTML start and closing tags (<HTML>content</HTML>) around the content.

      " }, "AccessControlConfigurationId":{ "shape":"AccessControlConfigurationId", @@ -4374,6 +4396,7 @@ "type":"list", "member":{"shape":"Status"} }, + "DocumentTitle":{"type":"string"}, "DocumentsMetadataConfiguration":{ "type":"structure", "members":{ @@ -6726,7 +6749,7 @@ "members":{ "IndexId":{ "shape":"IndexId", - "documentation":"

      The identifier of the index to search. The identifier is returned in the response from the CreateIndex API.

      " + "documentation":"

      The identifier of the index for the search.

      " }, "QueryText":{ "shape":"QueryText", @@ -6734,23 +6757,23 @@ }, "AttributeFilter":{ "shape":"AttributeFilter", - "documentation":"

      Enables filtered searches based on document attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

      The AttributeFilter parameter enables you to create a set of filtering rules that a document must satisfy to be included in the query results.

      " + "documentation":"

      Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

      The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

      " }, "Facets":{ "shape":"FacetList", - "documentation":"

      An array of documents attributes. Amazon Kendra returns a count for each attribute key specified. This helps your users narrow their search.

      " + "documentation":"

      An array of documents fields/attributes for faceted search. Amazon Kendra returns a count for each field key specified. This helps your users narrow their search.

      " }, "RequestedDocumentAttributes":{ "shape":"DocumentAttributeKeyList", - "documentation":"

      An array of document attributes to include in the response. You can limit the response to include certain document attributes. By default all document attributes are included in the response.

      " + "documentation":"

      An array of document fields/attributes to include in the response. You can limit the response to include certain document fields. By default, all document attributes are included in the response.

      " }, "QueryResultTypeFilter":{ "shape":"QueryResultType", - "documentation":"

      Sets the type of query. Only results for the specified query type are returned.

      " + "documentation":"

      Sets the type of query result or response. Only results for the specified type are returned.

      " }, "DocumentRelevanceOverrideConfigurations":{ "shape":"DocumentRelevanceOverrideConfigurationList", - "documentation":"

      Overrides relevance tuning configurations of fields or attributes set at the index level.

      If you use this API to override the relevance tuning configured at the index level, but there is no relevance tuning configured at the index level, then Amazon Kendra does not apply any relevance tuning.

      If there is relevance tuning configured at the index level, but you do not use this API to override any relevance tuning in the index, then Amazon Kendra uses the relevance tuning that is configured at the index level.

      If there is relevance tuning configured for fields at the index level, but you use this API to override only some of these fields, then for the fields you did not override, the importance is set to 1.

      " + "documentation":"

      Overrides relevance tuning configurations of fields/attributes set at the index level.

      If you use this API to override the relevance tuning configured at the index level, but there is no relevance tuning configured at the index level, then Amazon Kendra does not apply any relevance tuning.

      If there is relevance tuning configured for fields at the index level, and you use this API to override only some of these fields, then for the fields you did not override, the importance is set to 1.

      " }, "PageNumber":{ "shape":"Integer", @@ -6783,7 +6806,7 @@ "members":{ "QueryId":{ "shape":"QueryId", - "documentation":"

      The identifier for the search. You use QueryId to identify the search when using the feedback API.

      " + "documentation":"

      The identifier for the search. You also use QueryId to identify the search when using the SubmitFeedback API.

      " }, "ResultItems":{ "shape":"QueryResultItemList", @@ -6791,11 +6814,11 @@ }, "FacetResults":{ "shape":"FacetResultList", - "documentation":"

      Contains the facet results. A FacetResult contains the counts for each attribute key that was specified in the Facets input parameter.

      " + "documentation":"

      Contains the facet results. A FacetResult contains the counts for each field/attribute key that was specified in the Facets input parameter.

      " }, "TotalNumberOfResults":{ "shape":"Integer", - "documentation":"

      The total number of items found by the search; however, you can only retrieve up to 100 items. For example, if the search found 192 items, you can only retrieve the first 100 of the items.

      " + "documentation":"

      The total number of items found by the search. However, you can only retrieve up to 100 items. For example, if the search found 192 items, you can only retrieve the first 100 of the items.

      " }, "Warnings":{ "shape":"WarningList", @@ -6835,7 +6858,7 @@ }, "AdditionalAttributes":{ "shape":"AdditionalResultAttributeList", - "documentation":"

      One or more additional attributes associated with the query result.

      " + "documentation":"

      One or more additional fields/attributes associated with the query result.

      " }, "DocumentId":{ "shape":"DocumentId", @@ -6855,15 +6878,15 @@ }, "DocumentAttributes":{ "shape":"DocumentAttributeList", - "documentation":"

      An array of document attributes assigned to a document in the search results. For example, the document author (_author) or the source URI (_source_uri) of the document.

      " + "documentation":"

      An array of document fields/attributes assigned to a document in the search results. For example, the document author (_author) or the source URI (_source_uri) of the document.

      " }, "ScoreAttributes":{ "shape":"ScoreAttributes", - "documentation":"

      Indicates the confidence that Amazon Kendra has that a result matches the query that you provided. Each result is placed into a bin that indicates the confidence, VERY_HIGH, HIGH, MEDIUM and LOW. You can use the score to determine if a response meets the confidence needed for your application.

      The field is only set to LOW when the Type field is set to DOCUMENT and Amazon Kendra is not confident that the result matches the query.

      " + "documentation":"

      Indicates the confidence level of Amazon Kendra providing a relevant result for the query. Each result is placed into a bin that indicates the confidence, VERY_HIGH, HIGH, MEDIUM and LOW. You can use the score to determine if a response meets the confidence needed for your application.

      The field is only set to LOW when the Type field is set to DOCUMENT and Amazon Kendra is not confident that the result is relevant to the query.

      " }, "FeedbackToken":{ "shape":"FeedbackToken", - "documentation":"

      A token that identifies a particular result from a particular query. Use this token to provide click-through feedback for the result. For more information, see Submitting feedback .

      " + "documentation":"

      A token that identifies a particular result from a particular query. Use this token to provide click-through feedback for the result. For more information, see Submitting feedback.

      " }, "TableExcerpt":{ "shape":"TableExcerpt", @@ -6953,11 +6976,7 @@ "UPDATING" ] }, - "QueryText":{ - "type":"string", - "max":1000, - "min":1 - }, + "QueryText":{"type":"string"}, "QueryTextList":{ "type":"list", "member":{"shape":"QueryText"}, @@ -7131,6 +7150,94 @@ "max":73, "min":1 }, + "RetrieveRequest":{ + "type":"structure", + "required":[ + "IndexId", + "QueryText" + ], + "members":{ + "IndexId":{ + "shape":"IndexId", + "documentation":"

      The identifier of the index to retrieve relevant passages for the search.

      " + }, + "QueryText":{ + "shape":"QueryText", + "documentation":"

      The input query text to retrieve relevant passages for the search. Amazon Kendra truncates queries at 30 token words, which excludes punctuation and stop words. Truncation still applies if you use Boolean or more advanced, complex queries.

      " + }, + "AttributeFilter":{ + "shape":"AttributeFilter", + "documentation":"

      Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

      The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

      " + }, + "RequestedDocumentAttributes":{ + "shape":"DocumentAttributeKeyList", + "documentation":"

      A list of document fields/attributes to include in the response. You can limit the response to include certain document fields. By default, all document fields are included in the response.

      " + }, + "DocumentRelevanceOverrideConfigurations":{ + "shape":"DocumentRelevanceOverrideConfigurationList", + "documentation":"

      Overrides relevance tuning configurations of fields/attributes set at the index level.

      If you use this API to override the relevance tuning configured at the index level, but there is no relevance tuning configured at the index level, then Amazon Kendra does not apply any relevance tuning.

      If there is relevance tuning configured for fields at the index level, and you use this API to override only some of these fields, then for the fields you did not override, the importance is set to 1.

      " + }, + "PageNumber":{ + "shape":"Integer", + "documentation":"

      Retrieved relevant passages are returned in pages the size of the PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this parameter to get result pages after the first one.

      " + }, + "PageSize":{ + "shape":"Integer", + "documentation":"

      Sets the number of retrieved relevant passages that are returned in each page of results. The default page size is 10. The maximum number of results returned is 100. If you ask for more than 100 results, only 100 are returned.

      " + }, + "UserContext":{ + "shape":"UserContext", + "documentation":"

      The user context token or user and group information.

      " + } + } + }, + "RetrieveResult":{ + "type":"structure", + "members":{ + "QueryId":{ + "shape":"QueryId", + "documentation":"

      The identifier of query used for the search. You also use QueryId to identify the search when using the Submitfeedback API.

      " + }, + "ResultItems":{ + "shape":"RetrieveResultItemList", + "documentation":"

      The results of the retrieved relevant passages for the search.

      " + } + } + }, + "RetrieveResultItem":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ResultId", + "documentation":"

      The identifier of the relevant passage result.

      " + }, + "DocumentId":{ + "shape":"DocumentId", + "documentation":"

      The identifier of the document.

      " + }, + "DocumentTitle":{ + "shape":"DocumentTitle", + "documentation":"

      The title of the document.

      " + }, + "Content":{ + "shape":"Content", + "documentation":"

      The contents of the relevant passage.

      " + }, + "DocumentURI":{ + "shape":"Url", + "documentation":"

      The URI of the original location of the document.

      " + }, + "DocumentAttributes":{ + "shape":"DocumentAttributeList", + "documentation":"

      An array of document fields/attributes assigned to a document in the search results. For example, the document author (_author) or the source URI (_source_uri) of the document.

      " + } + }, + "documentation":"

      A single retrieved relevant passage result.

      " + }, + "RetrieveResultItemList":{ + "type":"list", + "member":{"shape":"RetrieveResultItem"} + }, "RoleArn":{ "type":"string", "max":1284, @@ -7458,10 +7565,10 @@ "members":{ "ScoreConfidence":{ "shape":"ScoreConfidence", - "documentation":"

      A relative ranking for how well the response matches the query.

      " + "documentation":"

      A relative ranking for how relevant the response is to the query.

      " } }, - "documentation":"

      Provides a relative ranking that indicates how confident Amazon Kendra is that the response matches the query.

      " + "documentation":"

      Provides a relative ranking that indicates how confident Amazon Kendra is that the response is relevant to the query.

      " }, "ScoreConfidence":{ "type":"string", @@ -7524,7 +7631,7 @@ }, "WebCrawlerMode":{ "shape":"WebCrawlerMode", - "documentation":"

      You can choose one of the following modes:

      • HOST_ONLY – crawl only the website host names. For example, if the seed URL is \"abc.example.com\", then only URLs with host name \"abc.example.com\" are crawled.

      • SUBDOMAINS – crawl the website host names with subdomains. For example, if the seed URL is \"abc.example.com\", then \"a.abc.example.com\" and \"b.abc.example.com\" are also crawled.

      • EVERYTHING – crawl the website host names with subdomains and other domains that the web pages link to.

      The default mode is set to HOST_ONLY.

      " + "documentation":"

      You can choose one of the following modes:

      • HOST_ONLY—crawl only the website host names. For example, if the seed URL is \"abc.example.com\", then only URLs with host name \"abc.example.com\" are crawled.

      • SUBDOMAINS—crawl the website host names with subdomains. For example, if the seed URL is \"abc.example.com\", then \"a.abc.example.com\" and \"b.abc.example.com\" are also crawled.

      • EVERYTHING—crawl the website host names with subdomains and other domains that the web pages link to.

      The default mode is set to HOST_ONLY.

      " } }, "documentation":"

      Provides the configuration information for the seed or starting point URLs to crawl.

      When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use Amazon Kendra Web Crawler to index your own web pages, or web pages that you have authorization to index.

      " @@ -7610,11 +7717,11 @@ }, "IncludeAttachmentFilePatterns":{ "shape":"DataSourceInclusionsExclusionsStrings", - "documentation":"

      A list of regular expression patterns to include certain attachments of knowledge articles in your ServiceNow. Item that match the patterns are included in the index. Items that don't match the patterns are excluded from the index. If an item matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the item isn't included in the index.

      The regex is applied to the field specified in the PatternTargetField.

      " + "documentation":"

      A list of regular expression patterns applied to include knowledge article attachments. Attachments that match the patterns are included in the index. Items that don't match the patterns are excluded from the index. If an item matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the item isn't included in the index.

      " }, "ExcludeAttachmentFilePatterns":{ "shape":"DataSourceInclusionsExclusionsStrings", - "documentation":"

      A list of regular expression patterns to exclude certain attachments of knowledge articles in your ServiceNow. Item that match the patterns are excluded from the index. Items that don't match the patterns are included in the index. If an item matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the item isn't included in the index.

      The regex is applied to the field specified in the PatternTargetField.

      " + "documentation":"

      A list of regular expression patterns applied to exclude certain knowledge article attachments. Attachments that match the patterns are excluded from the index. Items that don't match the patterns are included in the index. If an item matches both an inclusion and exclusion pattern, the exclusion pattern takes precedence and the item isn't included in the index.

      " }, "DocumentDataFieldName":{ "shape":"DataSourceFieldName", @@ -9003,7 +9110,7 @@ }, "CrawlDepth":{ "shape":"CrawlDepth", - "documentation":"

      Specifies the number of levels in a website that you want to crawl.

      The first level begins from the website seed or starting point URL. For example, if a website has three levels—index level (the seed in this example), sections level, and subsections level—and you are only interested in crawling information up to the sections level (levels 0-1), you can set your depth to 1.

      The default crawl depth is set to 2.

      " + "documentation":"

      The 'depth' or number of levels from the seed level to crawl. For example, the seed URL page is depth 1 and any hyperlinks on this page that are also crawled are depth 2.

      " }, "MaxLinksPerPage":{ "shape":"MaxLinksPerPage", diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index b37342a5a0cd..db644b07a2b5 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 4a27ef320a5e..e4a7a9d37ae2 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/keyspaces/src/main/resources/codegen-resources/endpoint-tests.json b/services/keyspaces/src/main/resources/codegen-resources/endpoint-tests.json index cd6fb8403293..90a9df595633 100644 --- a/services/keyspaces/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/keyspaces/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -411,9 +411,20 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -424,9 +435,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -437,9 +459,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -450,9 +483,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -463,9 +507,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -476,9 +520,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -490,8 +534,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -501,9 +545,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -513,11 +557,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/keyspaces/src/main/resources/codegen-resources/service-2.json b/services/keyspaces/src/main/resources/codegen-resources/service-2.json index 9c4fc96d9c7f..1127adc7cc28 100644 --- a/services/keyspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/keyspaces/src/main/resources/codegen-resources/service-2.json @@ -202,7 +202,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Associates a set of tags with a Amazon Keyspaces resource. You can then activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer Guide.

      For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, see Amazon Keyspaces resource access based on tags in the Amazon Keyspaces Developer Guide.

      " + "documentation":"

      Associates a set of tags with a Amazon Keyspaces resource. You can then activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer Guide.

      For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, see Amazon Keyspaces resource access based on tags in the Amazon Keyspaces Developer Guide.

      " }, "UntagResource":{ "name":"UntagResource", @@ -393,6 +393,10 @@ "tags":{ "shape":"TagList", "documentation":"

      A list of key-value pair tags to be attached to the keyspace.

      For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer Guide.

      " + }, + "replicationSpecification":{ + "shape":"ReplicationSpecification", + "documentation":"

      The replication specification of the keyspace includes:

      • replicationStrategy - the required value is SINGLE_REGION or MULTI_REGION.

      • regionList - if the replicationStrategy is MULTI_REGION, the regionList requires the current Region and at least one additional Amazon Web Services Region where the keyspace is going to be replicated in. The maximum number of supported replication Regions including the current Region is six.

      " } } }, @@ -550,7 +554,8 @@ "type":"structure", "required":[ "keyspaceName", - "resourceArn" + "resourceArn", + "replicationStrategy" ], "members":{ "keyspaceName":{ @@ -559,7 +564,15 @@ }, "resourceArn":{ "shape":"ARN", - "documentation":"

      The ARN of the keyspace.

      " + "documentation":"

      Returns the ARN of the keyspace.

      " + }, + "replicationStrategy":{ + "shape":"rs", + "documentation":"

      Returns the replication strategy of the keyspace. The options are SINGLE_REGION or MULTI_REGION.

      " + }, + "replicationRegions":{ + "shape":"RegionList", + "documentation":"

      If the replicationStrategy of the keyspace is MULTI_REGION, a list of replication Regions is returned.

      " } } }, @@ -655,13 +668,14 @@ "type":"string", "max":48, "min":1, - "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{1,47}" + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{0,47}" }, "KeyspaceSummary":{ "type":"structure", "required":[ "keyspaceName", - "resourceArn" + "resourceArn", + "replicationStrategy" ], "members":{ "keyspaceName":{ @@ -671,6 +685,14 @@ "resourceArn":{ "shape":"ARN", "documentation":"

      The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).

      " + }, + "replicationStrategy":{ + "shape":"rs", + "documentation":"

      This property specifies if a keyspace is a single Region keyspace or a multi-Region keyspace. The available values are SINGLE_REGION or MULTI_REGION.

      " + }, + "replicationRegions":{ + "shape":"RegionList", + "documentation":"

      If the replicationStrategy of the keyspace is MULTI_REGION, a list of replication Regions is returned.

      " } }, "documentation":"

      Represents the properties of a keyspace.

      " @@ -828,6 +850,27 @@ }, "documentation":"

      The point-in-time recovery status of the specified table.

      " }, + "RegionList":{ + "type":"list", + "member":{"shape":"region"}, + "max":6, + "min":2 + }, + "ReplicationSpecification":{ + "type":"structure", + "required":["replicationStrategy"], + "members":{ + "replicationStrategy":{ + "shape":"rs", + "documentation":"

      The replicationStrategy of a keyspace, the required value is SINGLE_REGION or MULTI_REGION.

      " + }, + "regionList":{ + "shape":"RegionList", + "documentation":"

      The regionList can contain up to six Amazon Web Services Regions where the keyspace is replicated in.

      " + } + }, + "documentation":"

      The replication specification of the keyspace includes:

      • regionList - up to six Amazon Web Services Regions where the keyspace is replicated in.

      • replicationStrategy - the required value is SINGLE_REGION or MULTI_REGION.

      " + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -958,7 +1001,7 @@ "type":"string", "max":48, "min":1, - "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{1,47}" + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{0,47}" }, "TableStatus":{ "type":"string", @@ -1167,6 +1210,20 @@ "type":"string", "max":5096, "min":1 + }, + "region":{ + "type":"string", + "max":25, + "min":2 + }, + "rs":{ + "type":"string", + "enum":[ + "SINGLE_REGION", + "MULTI_REGION" + ], + "max":20, + "min":1 } }, "documentation":"

      Amazon Keyspaces (for Apache Cassandra) is a scalable, highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.

      In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI, as well as infrastructure as code (IaC) services and tools such as CloudFormation and Terraform. This API reference describes the supported DDL operations in detail.

      For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types in Amazon Keyspaces in the Amazon Keyspaces Developer Guide.

      To learn how Amazon Keyspaces API actions are recorded with CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer Guide.

      For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.

      " diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 28da417afbfd..c7058bbc986b 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index bc5656fb2196..ff1ae366b33e 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index c8735596d3d4..e022acba83b4 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 8c21ddd928e9..3493a3adfcb6 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 8a1e1af7ee56..02bd4c3621b8 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 6aee0d40bca6..cc460f129f06 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 9daff872728e..c28e1156ed6e 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index 4cb77a652b77..f24d9cf3df8f 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index bb3aa62d4cc9..d1b07d47e087 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/kms/src/main/resources/codegen-resources/service-2.json b/services/kms/src/main/resources/codegen-resources/service-2.json index b0831901380a..c096e0a6e7c2 100644 --- a/services/kms/src/main/resources/codegen-resources/service-2.json +++ b/services/kms/src/main/resources/codegen-resources/service-2.json @@ -137,7 +137,7 @@ {"shape":"XksKeyAlreadyInUseException"}, {"shape":"XksKeyNotFoundException"} ], - "documentation":"

      Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

      A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

      Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

      KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

      To create different types of KMS keys, use the following guidance:

      Symmetric encryption KMS key

      By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

      To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

      If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

      Asymmetric KMS keys

      To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

      Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

      HMAC KMS key

      To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

      HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

      HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

      Multi-Region primary keys
      Imported key material

      To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

      You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

      This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

      To import your own key material into a KMS key, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

      This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

      To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

      Custom key store

      A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

      KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

      Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

      Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

      To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

      To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

      To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

      Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

      Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

      Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

      Related operations:

      " + "documentation":"

      Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

      A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

      Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

      KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

      To create different types of KMS keys, use the following guidance:

      Symmetric encryption KMS key

      By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

      To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

      If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

      Asymmetric KMS keys

      To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

      Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

      HMAC KMS key

      To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

      HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

      Multi-Region primary keys
      Imported key material

      To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

      You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

      This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

      To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

      You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

      To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

      Custom key store

      A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

      KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

      Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

      Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

      To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

      To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

      To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

      Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

      Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

      Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

      Related operations:

      " }, "Decrypt":{ "name":"Decrypt", @@ -207,7 +207,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

      Deletes key material that you previously imported. This operation makes the specified KMS key unusable. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

      When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

      After you delete key material, you can use ImportKeyMaterial to reimport the same key material into the KMS key.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:DeleteImportedKeyMaterial (key policy)

      Related operations:

      " + "documentation":"

      Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

      When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:DeleteImportedKeyMaterial (key policy)

      Related operations:

      " }, "DescribeCustomKeyStores":{ "name":"DescribeCustomKeyStores", @@ -513,7 +513,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

      Returns the items you need to import key material into a symmetric encryption KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

      This operation returns a public key and an import token. Use the public key to encrypt the symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.

      You must specify the key ID of the symmetric encryption KMS key into which you will import key material. The KMS key Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.

      To import key material, you must use the public key and import token from the same response. These items are valid for 24 hours. The expiration date and time appear in the GetParametersForImport response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:GetParametersForImport (key policy)

      Related operations:

      " + "documentation":"

      Returns the public key and an import token you need to import or reimport key material for a KMS key.

      By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

      Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted.

      GetParametersForImport returns the items that you need to import your key material.

      • The public key (or \"wrapping key\") of an RSA key pair that KMS generates.

        You will use this public key to encrypt (\"wrap\") your key material while it's in transit to KMS.

      • A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.

      The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

      GetParametersForImport requires the following information:

      • The key ID of the KMS key for which you are importing the key material.

      • The key spec of the public key (\"wrapping key\") that you will use to encrypt your key material during import.

      • The wrapping algorithm that you will use with the public key to encrypt your key material.

      You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:GetParametersForImport (key policy)

      Related operations:

      " }, "GetPublicKey":{ "name":"GetPublicKey", @@ -557,7 +557,7 @@ {"shape":"ExpiredImportTokenException"}, {"shape":"InvalidImportTokenException"} ], - "documentation":"

      Imports key material into an existing symmetric encryption KMS key that was created without key material. After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material.

      You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and then importing key material, see Importing Key Material in the Key Management Service Developer Guide.

      Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

      When calling this operation, you must specify the following values:

      • The key ID or key ARN of a KMS key with no key material. Its Origin must be EXTERNAL.

        To create a KMS key with no key material, call CreateKey and set the value of its Origin parameter to EXTERNAL. To get the Origin of a KMS key, call DescribeKey.)

      • The encrypted key material. To get the public key to encrypt the key material, call GetParametersForImport.

      • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

      • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). If you set an expiration date, on the specified date, KMS deletes the key material from the KMS key, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. The only way to change the expiration model or expiration date is by reimporting the same key material and specifying a new expiration date.

      When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key.

      If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:ImportKeyMaterial (key policy)

      Related operations:

      " + "documentation":"

      Imports or reimports key material into an existing KMS key that was created without key material. ImportKeyMaterial also sets the expiration model and expiration date of the imported key material.

      By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

      After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration model or expiration date of the key material. Before reimporting key material, if necessary, call DeleteImportedKeyMaterial to delete the current imported key material.

      Each time you import key material into KMS, you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To change the expiration of your key material, you must import it again, either by calling ImportKeyMaterial or using the import features of the KMS console.

      Before calling ImportKeyMaterial:

      • Create or identify a KMS key with no key material. The KMS key must have an Origin value of EXTERNAL, which indicates that the KMS key is designed for imported key material.

        To create an new KMS key for imported key material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store.

      • Use the DescribeKey operation to verify that the KeyState of the KMS key is PendingImport, which indicates that the KMS key has no key material.

        If you are reimporting the same key material into an existing KMS key, you might need to call the DeleteImportedKeyMaterial to delete its existing key material.

      • Call the GetParametersForImport operation to get a public key and import token set for importing key material.

      • Use the public key in the GetParametersForImport response to encrypt your key material.

      Then, in an ImportKeyMaterial request, you submit your encrypted key material and import token. When calling this operation, you must specify the following values:

      • The key ID or key ARN of the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call DescribeKey.

      • The encrypted key material.

      • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

      • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

        If you set an expiration date, KMS deletes the key material from the KMS key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. However, you can delete and reimport the key material at any time, including before the key material expires. Each time you reimport, you can eliminate or reset the expiration time.

      When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic operations.

      If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:ImportKeyMaterial (key policy)

      Related operations:

      " }, "ListAliases":{ "name":"ListAliases", @@ -773,7 +773,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

      Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

      Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use DisableKey.

      You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

      When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

      For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:ScheduleKeyDeletion (key policy)

      Related operations

      " + "documentation":"

      Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

      Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key, or an asymmetric or HMAC KMS key with imported key material[BUGBUG-link to importing-keys-managing.html#import-delete-key.) To prevent the use of a KMS key without deleting it, use DisableKey.

      You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

      When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

      For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

      The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

      Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

      Required permissions: kms:ScheduleKeyDeletion (key policy)

      Related operations

      " }, "Sign":{ "name":"Sign", @@ -955,7 +955,9 @@ "enum":[ "RSAES_PKCS1_V1_5", "RSAES_OAEP_SHA_1", - "RSAES_OAEP_SHA_256" + "RSAES_OAEP_SHA_256", + "RSA_AES_KEY_WRAP_SHA_1", + "RSA_AES_KEY_WRAP_SHA_256" ] }, "AliasList":{ @@ -2078,15 +2080,15 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

      The identifier of the symmetric encryption KMS key into which you will import key material. The Origin of the KMS key must be EXTERNAL.

      Specify the key ID or key ARN of the KMS key.

      For example:

      • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

      " + "documentation":"

      The identifier of the KMS key that will be associated with the imported key material. The Origin of the KMS key must be EXTERNAL.

      All KMS key types are supported, including multi-Region keys. However, you cannot import key material into a KMS key in a custom key store.

      Specify the key ID or key ARN of the KMS key.

      For example:

      • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

      " }, "WrappingAlgorithm":{ "shape":"AlgorithmSpec", - "documentation":"

      The algorithm you will use to encrypt the key material before using the ImportKeyMaterial operation to import it. For more information, see Encrypt the key material in the Key Management Service Developer Guide.

      The RSAES_PKCS1_V1_5 wrapping algorithm is deprecated. We recommend that you begin using a different wrapping algorithm immediately. KMS will end support for RSAES_PKCS1_V1_5 by October 1, 2023 pursuant to cryptographic key management guidance from the National Institute of Standards and Technology (NIST).

      " + "documentation":"

      The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.

      For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS.

      The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.

      • RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material.

      • RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.

      • RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).

        You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

      • RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key).

        You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

      • RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption key material (and only in legacy mode).

      " }, "WrappingKeySpec":{ "shape":"WrappingKeySpec", - "documentation":"

      The type of wrapping key (public key) to return in the response. Only 2048-bit RSA public keys are supported.

      " + "documentation":"

      The type of RSA public key to return in the response. You will use this wrapping key with the specified wrapping algorithm to protect your key material during import.

      Use the longest RSA wrapping key that is practical.

      You cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private key. Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public key.

      " } } }, @@ -2277,7 +2279,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

      The identifier of the symmetric encryption KMS key that receives the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account

      Specify the key ID or key ARN of the KMS key.

      For example:

      • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

      " + "documentation":"

      The identifier of the KMS key that will be associated with the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL and its KeyState must be PendingImport.

      The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key, including a multi-Region key of any supported type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account.

      Specify the key ID or key ARN of the KMS key.

      For example:

      • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

      • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

      To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

      " }, "ImportToken":{ "shape":"CiphertextType", @@ -2285,7 +2287,7 @@ }, "EncryptedKeyMaterial":{ "shape":"CiphertextType", - "documentation":"

      The encrypted key material to import. The key material must be encrypted with the public wrapping key that GetParametersForImport returned, using the wrapping algorithm that you specified in the same GetParametersForImport request.

      " + "documentation":"

      The encrypted key material to import. The key material must be encrypted under the public wrapping key that GetParametersForImport returned, using the wrapping algorithm that you specified in the same GetParametersForImport request.

      " }, "ValidTo":{ "shape":"DateType", @@ -2293,7 +2295,7 @@ }, "ExpirationModel":{ "shape":"ExpirationModelType", - "documentation":"

      Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES.

      When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

      You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial) and reimport the key material.

      " + "documentation":"

      Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

      When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

      You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must reimport the key material.

      " } } }, @@ -3168,7 +3170,7 @@ }, "PendingWindowInDays":{ "shape":"PendingWindowInDaysType", - "documentation":"

      The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

      If the KMS key is a multi-Region primary key with replica keys, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

      This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

      " + "documentation":"

      The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

      If the KMS key is a multi-Region primary key with replica keys, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

      This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30. You can use the kms:ScheduleKeyDeletionPendingWindowInDays condition key to further constrain the values that principals can specify in the PendingWindowInDays parameter.

      " } } }, @@ -3232,7 +3234,7 @@ }, "Signature":{ "shape":"CiphertextType", - "documentation":"

      The cryptographic signature that was generated for the message.

      • When used with the supported RSA signing algorithms, the encoding of this value is defined by PKCS #1 in RFC 8017.

      • When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section 2.2.3. This is the most commonly used signature format and is appropriate for most uses.

      When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.

      " + "documentation":"

      The cryptographic signature that was generated for the message.

      • When used with the supported RSA signing algorithms, the encoding of this value is defined by PKCS #1 in RFC 8017.

      • When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this value is a DER-encoded object as defined by ANSI X9.62–2005 and RFC 3279 Section 2.2.3. This is the most commonly used signature format and is appropriate for most uses.

      When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.

      " }, "SigningAlgorithm":{ "shape":"SigningAlgorithmSpec", @@ -3550,7 +3552,11 @@ }, "WrappingKeySpec":{ "type":"string", - "enum":["RSA_2048"] + "enum":[ + "RSA_2048", + "RSA_3072", + "RSA_4096" + ] }, "XksKeyAlreadyInUseException":{ "type":"structure", diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 1f6685328545..f9e514393ad3 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lakeformation/src/main/resources/codegen-resources/customization.config b/services/lakeformation/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/lakeformation/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index ca55634d7344..f2864a045eaf 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index 77de22f949a2..b8ddb938be49 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -615,7 +615,8 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotReadyException"} + {"shape":"ResourceNotReadyException"}, + {"shape":"RecursiveInvocationException"} ], "documentation":"

      Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

      For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

      When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

      For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

      The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

      For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

      This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

      " }, @@ -675,7 +676,8 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotReadyException"} + {"shape":"ResourceNotReadyException"}, + {"shape":"RecursiveInvocationException"} ], "documentation":"

      Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

      This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

      " }, @@ -1882,7 +1884,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

      The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, this key is also used to encrypt your function's snapshot. If you don't provide a customer managed key, Lambda uses a default service key.

      " + "documentation":"

      The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key.

      " }, "TracingConfig":{ "shape":"TracingConfig", @@ -2471,7 +2473,7 @@ }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", - "documentation":"

      (Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

      The minimum value that can be set is 60 seconds.

      " + "documentation":"

      (Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

      The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed

      " }, "BisectBatchOnFunctionError":{ "shape":"BisectBatchOnFunctionError", @@ -3255,7 +3257,7 @@ }, "CompatibleRuntimes":{ "shape":"CompatibleRuntimes", - "documentation":"

      The layer's compatible runtimes.

      " + "documentation":"

      The layer's compatible runtimes.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      " }, "LicenseInfo":{ "shape":"LicenseInfo", @@ -3970,7 +3972,7 @@ }, "CompatibleRuntimes":{ "shape":"CompatibleRuntimes", - "documentation":"

      The layer's compatible runtimes.

      " + "documentation":"

      The layer's compatible runtimes.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      " }, "LicenseInfo":{ "shape":"LicenseInfo", @@ -4289,7 +4291,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

      A runtime identifier. For example, go1.x.

      ", + "documentation":"

      A runtime identifier. For example, go1.x.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      ", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4337,7 +4339,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

      A runtime identifier. For example, go1.x.

      ", + "documentation":"

      A runtime identifier. For example, go1.x.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      ", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4744,7 +4746,7 @@ }, "CompatibleRuntimes":{ "shape":"CompatibleRuntimes", - "documentation":"

      A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

      " + "documentation":"

      A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      " }, "LicenseInfo":{ "shape":"LicenseInfo", @@ -4785,7 +4787,7 @@ }, "CompatibleRuntimes":{ "shape":"CompatibleRuntimes", - "documentation":"

      The layer's compatible runtimes.

      " + "documentation":"

      The layer's compatible runtimes.

      The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

      " }, "LicenseInfo":{ "shape":"LicenseInfo", @@ -5029,6 +5031,22 @@ "max":1, "min":1 }, + "RecursiveInvocationException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

      The exception type.

      " + }, + "Message":{ + "shape":"String", + "documentation":"

      The exception message.

      " + } + }, + "documentation":"

      Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.

      ", + "error":{"httpStatusCode":400}, + "exception":true + }, "RemoveLayerVersionPermissionRequest":{ "type":"structure", "required":[ @@ -5209,7 +5227,8 @@ "provided.al2", "nodejs18.x", "python3.10", - "java17" + "java17", + "ruby3.2" ] }, "RuntimeVersionArn":{ @@ -5878,7 +5897,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

      The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, this key is also used to encrypt your function's snapshot. If you don't provide a customer managed key, Lambda uses a default service key.

      " + "documentation":"

      The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key.

      " }, "TracingConfig":{ "shape":"TracingConfig", diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 4eb00e50b7d8..b8770cc5c1ca 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 53f40158ccf5..48627ec955db 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 36685dd7c5ae..39dabf8b57c6 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-tests.json b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-tests.json index 3d99439ac552..6f052631e36e 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,579 +1,403 @@ { "testCases": [ { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ca-central-1.api.aws" + "url": "https://models-v2-lex.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ca-central-1.amazonaws.com" + "url": "https://models-v2-lex.ap-northeast-1.amazonaws.com" } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.eu-central-1.api.aws" + "url": "https://models-v2-lex.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.eu-central-1.amazonaws.com" + "url": "https://models-v2-lex.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.us-west-2.api.aws" + "url": "https://models-v2-lex.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.us-west-2.amazonaws.com" + "url": "https://models-v2-lex.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "af-south-1" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.af-south-1.api.aws" + "url": "https://models-v2-lex.eu-central-1.amazonaws.com" } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "af-south-1" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.af-south-1.amazonaws.com" + "url": "https://models-v2-lex.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.eu-west-2.amazonaws.com" + "url": "https://models-v2-lex.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.eu-west-2.api.aws" + "url": "https://models-v2-lex.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.eu-west-2.amazonaws.com" + "url": "https://models-v2-lex.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.eu-west-1.api.aws" + "url": "https://models-v2-lex-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.eu-west-1.amazonaws.com" + "url": "https://models-v2-lex-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.eu-west-1.amazonaws.com" + "url": "https://models-v2-lex.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-northeast-2.api.aws" + "url": "https://models-v2-lex-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-northeast-2.amazonaws.com" + "url": "https://models-v2-lex-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ap-northeast-2.api.aws" + "url": "https://models-v2-lex.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ap-northeast-2.amazonaws.com" + "url": "https://models-v2-lex.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-northeast-1.api.aws" + "url": "https://models-v2-lex-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-northeast-1.amazonaws.com" + "url": "https://models-v2-lex-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ap-northeast-1.api.aws" + "url": "https://models-v2-lex.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ap-northeast-1.amazonaws.com" + "url": "https://models-v2-lex.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.ap-southeast-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-southeast-1.amazonaws.com" + "url": "https://models-v2-lex-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://models-v2-lex.ap-southeast-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.ap-southeast-1.amazonaws.com" + "url": "https://models-v2-lex.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.ap-southeast-2.amazonaws.com" + "url": "https://models-v2-lex-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex.ap-southeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://models-v2-lex.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex-fips.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://models-v2-lex.us-east-1.api.aws" + "url": "https://models-v2-lex.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://models-v2-lex.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -582,7 +406,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -592,9 +415,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -604,11 +427,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/paginators-1.json b/services/lexmodelsv2/src/main/resources/codegen-resources/paginators-1.json index 895e54e4c13e..83a73642bd4f 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/paginators-1.json @@ -74,6 +74,26 @@ "input_token": "nextToken", "output_token": "nextToken", "limit_key": "maxResults" + }, + "ListTestExecutionResultItems": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListTestExecutions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListTestSetRecords": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListTestSets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" } } } diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index 80b107f76858..a33f9518bcef 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -277,6 +277,25 @@ ], "documentation":"

      Creates a custom slot type

      To create a custom slot type, specify a name for the slot type and a set of enumeration values, the values that a slot of this type can assume.

      " }, + "CreateTestSetDiscrepancyReport":{ + "name":"CreateTestSetDiscrepancyReport", + "http":{ + "method":"POST", + "requestUri":"/testsets/{testSetId}/testsetdiscrepancy", + "responseCode":202 + }, + "input":{"shape":"CreateTestSetDiscrepancyReportRequest"}, + "output":{"shape":"CreateTestSetDiscrepancyReportResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Create a report that describes the differences between the bot and the test set.

      " + }, "CreateUploadUrl":{ "name":"CreateUploadUrl", "http":{ @@ -514,6 +533,25 @@ ], "documentation":"

      Deletes a slot type from a bot locale.

      If a slot is using the slot type, Amazon Lex throws a ResourceInUseException exception. To avoid the exception, set the skipResourceInUseCheck parameter to true.

      " }, + "DeleteTestSet":{ + "name":"DeleteTestSet", + "http":{ + "method":"DELETE", + "requestUri":"/testsets/{testSetId}", + "responseCode":204 + }, + "input":{"shape":"DeleteTestSetRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The action to delete the selected test set.

      ", + "idempotent":true + }, "DeleteUtterances":{ "name":"DeleteUtterances", "http":{ @@ -741,6 +779,96 @@ ], "documentation":"

      Gets metadata information about a slot type.

      " }, + "DescribeTestExecution":{ + "name":"DescribeTestExecution", + "http":{ + "method":"GET", + "requestUri":"/testexecutions/{testExecutionId}", + "responseCode":200 + }, + "input":{"shape":"DescribeTestExecutionRequest"}, + "output":{"shape":"DescribeTestExecutionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets metadata information about the test execution.

      " + }, + "DescribeTestSet":{ + "name":"DescribeTestSet", + "http":{ + "method":"GET", + "requestUri":"/testsets/{testSetId}", + "responseCode":200 + }, + "input":{"shape":"DescribeTestSetRequest"}, + "output":{"shape":"DescribeTestSetResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets metadata information about the test set.

      " + }, + "DescribeTestSetDiscrepancyReport":{ + "name":"DescribeTestSetDiscrepancyReport", + "http":{ + "method":"GET", + "requestUri":"/testsetdiscrepancy/{testSetDiscrepancyReportId}", + "responseCode":200 + }, + "input":{"shape":"DescribeTestSetDiscrepancyReportRequest"}, + "output":{"shape":"DescribeTestSetDiscrepancyReportResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets metadata information about the test set discrepancy report.

      " + }, + "DescribeTestSetGeneration":{ + "name":"DescribeTestSetGeneration", + "http":{ + "method":"GET", + "requestUri":"/testsetgenerations/{testSetGenerationId}", + "responseCode":200 + }, + "input":{"shape":"DescribeTestSetGenerationRequest"}, + "output":{"shape":"DescribeTestSetGenerationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets metadata information about the test set generation.

      " + }, + "GetTestExecutionArtifactsUrl":{ + "name":"GetTestExecutionArtifactsUrl", + "http":{ + "method":"GET", + "requestUri":"/testexecutions/{testExecutionId}/artifacturl", + "responseCode":200 + }, + "input":{"shape":"GetTestExecutionArtifactsUrlRequest"}, + "output":{"shape":"GetTestExecutionArtifactsUrlResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The pre-signed Amazon S3 URL to download the test execution result artifacts.

      " + }, "ListAggregatedUtterances":{ "name":"ListAggregatedUtterances", "http":{ @@ -1013,6 +1141,76 @@ ], "documentation":"

      Gets a list of tags associated with a resource. Only bots, bot aliases, and bot channels can have tags associated with them.

      " }, + "ListTestExecutionResultItems":{ + "name":"ListTestExecutionResultItems", + "http":{ + "method":"POST", + "requestUri":"/testexecutions/{testExecutionId}/results", + "responseCode":200 + }, + "input":{"shape":"ListTestExecutionResultItemsRequest"}, + "output":{"shape":"ListTestExecutionResultItemsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets a list of test execution result items.

      " + }, + "ListTestExecutions":{ + "name":"ListTestExecutions", + "http":{ + "method":"POST", + "requestUri":"/testexecutions", + "responseCode":200 + }, + "input":{"shape":"ListTestExecutionsRequest"}, + "output":{"shape":"ListTestExecutionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The list of test set executions.

      " + }, + "ListTestSetRecords":{ + "name":"ListTestSetRecords", + "http":{ + "method":"POST", + "requestUri":"/testsets/{testSetId}/records", + "responseCode":200 + }, + "input":{"shape":"ListTestSetRecordsRequest"}, + "output":{"shape":"ListTestSetRecordsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The list of test set records.

      " + }, + "ListTestSets":{ + "name":"ListTestSets", + "http":{ + "method":"POST", + "requestUri":"/testsets", + "responseCode":200 + }, + "input":{"shape":"ListTestSetsRequest"}, + "output":{"shape":"ListTestSetsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The list of the test sets

      " + }, "SearchAssociatedTranscripts":{ "name":"SearchAssociatedTranscripts", "http":{ @@ -1071,6 +1269,45 @@ ], "documentation":"

      Starts importing a bot, bot locale, or custom vocabulary from a zip archive that you uploaded to an S3 bucket.

      " }, + "StartTestExecution":{ + "name":"StartTestExecution", + "http":{ + "method":"POST", + "requestUri":"/testsets/{testSetId}/testexecutions", + "responseCode":202 + }, + "input":{"shape":"StartTestExecutionRequest"}, + "output":{"shape":"StartTestExecutionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The action to start test set execution.

      " + }, + "StartTestSetGeneration":{ + "name":"StartTestSetGeneration", + "http":{ + "method":"PUT", + "requestUri":"/testsetgenerations", + "responseCode":202 + }, + "input":{"shape":"StartTestSetGenerationRequest"}, + "output":{"shape":"StartTestSetGenerationResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The action to start the generation of test set.

      ", + "idempotent":true + }, "StopBotRecommendation":{ "name":"StopBotRecommendation", "http":{ @@ -1298,9 +1535,52 @@ {"shape":"InternalServerException"} ], "documentation":"

      Updates the configuration of an existing slot type.

      " + }, + "UpdateTestSet":{ + "name":"UpdateTestSet", + "http":{ + "method":"PUT", + "requestUri":"/testsets/{testSetId}", + "responseCode":200 + }, + "input":{"shape":"UpdateTestSetRequest"}, + "output":{"shape":"UpdateTestSetResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      The action to update the test set.

      ", + "idempotent":true } }, "shapes":{ + "ActiveContext":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ActiveContextName", + "documentation":"

      The name of active context.

      " + } + }, + "documentation":"

      The active context used in the test execution.

      " + }, + "ActiveContextList":{ + "type":"list", + "member":{"shape":"ActiveContext"}, + "max":20, + "min":0 + }, + "ActiveContextName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([A-Za-z]_?)+$" + }, "AdvancedRecognitionSetting":{ "type":"structure", "members":{ @@ -1311,6 +1591,41 @@ }, "documentation":"

      Provides settings that enable advanced recognition settings for slot values.

      " }, + "AgentTurnResult":{ + "type":"structure", + "required":["expectedAgentPrompt"], + "members":{ + "expectedAgentPrompt":{ + "shape":"TestSetAgentPrompt", + "documentation":"

      The expected agent prompt for the agent turn in a test set execution.

      " + }, + "actualAgentPrompt":{ + "shape":"TestSetAgentPrompt", + "documentation":"

      The actual agent prompt for the agent turn in a test set execution.

      " + }, + "errorDetails":{"shape":"ExecutionErrorDetails"}, + "actualElicitedSlot":{ + "shape":"TestResultSlotName", + "documentation":"

      The actual elicited slot for the agent turn in a test set execution.

      " + }, + "actualIntent":{ + "shape":"Name", + "documentation":"

      The actual intent for the agent turn in a test set execution.

      " + } + }, + "documentation":"

      The information about the agent turn in a test set execution.

      " + }, + "AgentTurnSpecification":{ + "type":"structure", + "required":["agentPrompt"], + "members":{ + "agentPrompt":{ + "shape":"TestSetAgentPrompt", + "documentation":"

      The agent prompt for the agent turn in a test set.

      " + } + }, + "documentation":"

      The specification of an agent turn.

      " + }, "AggregatedUtterancesFilter":{ "type":"structure", "required":[ @@ -1507,6 +1822,12 @@ }, "documentation":"

      Specifies the audio and DTMF input specification.

      " }, + "AudioFileS3Location":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^s3://([a-z0-9\\\\.-]+)/(.+)$" + }, "AudioLogDestination":{ "type":"structure", "required":["s3Bucket"], @@ -1838,6 +2159,29 @@ "type":"list", "member":{"shape":"BotAliasSummary"} }, + "BotAliasTestExecutionTarget":{ + "type":"structure", + "required":[ + "botId", + "botAliasId", + "localeId" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

      The bot Id of the bot alias used in the test set execution.

      " + }, + "botAliasId":{ + "shape":"BotAliasId", + "documentation":"

      The bot alias Id of the bot alias used in the test set execution.

      " + }, + "localeId":{ + "shape":"LocaleId", + "documentation":"

      The locale Id of the bot alias used in the test set execution.

      " + } + }, + "documentation":"

      The target Amazon S3 location for the test set execution using a bot alias.

      " + }, "BotExportSpecification":{ "type":"structure", "required":[ @@ -2738,35 +3082,214 @@ "max":20, "min":1 }, - "ConversationLogSettings":{ + "ConversationLevelIntentClassificationResultItem":{ + "type":"structure", + "required":[ + "intentName", + "matchResult" + ], + "members":{ + "intentName":{ + "shape":"Name", + "documentation":"

      The intent name used in the evaluation of intent level success or failure.

      " + }, + "matchResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The number of times the specific intent is used in the evaluation of intent level success or failure.

      " + } + }, + "documentation":"

      The item listing the evaluation of intent level success or failure.

      " + }, + "ConversationLevelIntentClassificationResults":{ + "type":"list", + "member":{"shape":"ConversationLevelIntentClassificationResultItem"} + }, + "ConversationLevelResultDetail":{ "type":"structure", + "required":["endToEndResult"], "members":{ - "textLogSettings":{ - "shape":"TextLogSettingsList", - "documentation":"

      The Amazon CloudWatch Logs settings for logging text and metadata.

      " + "endToEndResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The success or failure of the streaming of the conversation.

      " }, - "audioLogSettings":{ - "shape":"AudioLogSettingsList", - "documentation":"

      The Amazon S3 settings for logging audio to an S3 bucket.

      " + "speechTranscriptionResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The speech transcription success or failure details of the conversation.

      " } }, - "documentation":"

      Configures conversation logging that saves audio, text, and metadata for the conversations with your users.

      " + "documentation":"

      The conversation level details of the conversation used in the test set.

      " }, - "Count":{"type":"integer"}, - "CreateBotAliasRequest":{ + "ConversationLevelSlotResolutionResultItem":{ "type":"structure", "required":[ - "botAliasName", - "botId" + "intentName", + "slotName", + "matchResult" ], "members":{ - "botAliasName":{ + "intentName":{ "shape":"Name", - "documentation":"

      The alias to create. The name must be unique for the bot.

      " + "documentation":"

      The intents used in the slots list for the slot resolution details.

      " }, - "description":{ - "shape":"Description", - "documentation":"

      A description of the alias. Use this description to help identify the alias.

      " + "slotName":{ + "shape":"TestResultSlotName", + "documentation":"

      The slot name in the slots list for the slot resolution details.

      " + }, + "matchResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The number of matching slots used in the slots listings for the slot resolution evaluation.

      " + } + }, + "documentation":"

      The slots used for the slot resolution in the conversation.

      " + }, + "ConversationLevelSlotResolutionResults":{ + "type":"list", + "member":{"shape":"ConversationLevelSlotResolutionResultItem"} + }, + "ConversationLevelTestResultItem":{ + "type":"structure", + "required":[ + "conversationId", + "endToEndResult", + "intentClassificationResults", + "slotResolutionResults" + ], + "members":{ + "conversationId":{ + "shape":"TestSetConversationId", + "documentation":"

      The conversation Id of the test result evaluation item.

      " + }, + "endToEndResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The end-to-end success or failure of the test result evaluation item.

      " + }, + "speechTranscriptionResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The speech transcription success or failure of the test result evaluation item.

      " + }, + "intentClassificationResults":{ + "shape":"ConversationLevelIntentClassificationResults", + "documentation":"

      The intent classification of the test result evaluation item.

      " + }, + "slotResolutionResults":{ + "shape":"ConversationLevelSlotResolutionResults", + "documentation":"

      The slot success or failure of the test result evaluation item.

      " + } + }, + "documentation":"

      The test result evaluation item at the conversation level.

      " + }, + "ConversationLevelTestResultItemList":{ + "type":"list", + "member":{"shape":"ConversationLevelTestResultItem"} + }, + "ConversationLevelTestResults":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"ConversationLevelTestResultItemList", + "documentation":"

      The item list in the test set results data at the conversation level.

      " + } + }, + "documentation":"

      The test set results data at the conversation level.

      " + }, + "ConversationLevelTestResultsFilterBy":{ + "type":"structure", + "members":{ + "endToEndResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      The selection of matched or mismatched end-to-end status to filter test set results data at the conversation level.

      " + } + }, + "documentation":"

      The selection to filter the test set results data at the conversation level.

      " + }, + "ConversationLogSettings":{ + "type":"structure", + "members":{ + "textLogSettings":{ + "shape":"TextLogSettingsList", + "documentation":"

      The Amazon CloudWatch Logs settings for logging text and metadata.

      " + }, + "audioLogSettings":{ + "shape":"AudioLogSettingsList", + "documentation":"

      The Amazon S3 settings for logging audio to an S3 bucket.

      " + } + }, + "documentation":"

      Configures conversation logging that saves audio, text, and metadata for the conversations with your users.

      " + }, + "ConversationLogsDataSource":{ + "type":"structure", + "required":[ + "botId", + "botAliasId", + "localeId", + "filter" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

      The bot Id from the conversation logs.

      " + }, + "botAliasId":{ + "shape":"BotAliasId", + "documentation":"

      The bot alias Id from the conversation logs.

      " + }, + "localeId":{ + "shape":"LocaleId", + "documentation":"

      The locale Id of the conversation log.

      " + }, + "filter":{ + "shape":"ConversationLogsDataSourceFilterBy", + "documentation":"

      The filter for the data source of the conversation log.

      " + } + }, + "documentation":"

      The data source that uses conversation logs.

      " + }, + "ConversationLogsDataSourceFilterBy":{ + "type":"structure", + "required":[ + "startTime", + "endTime", + "inputMode" + ], + "members":{ + "startTime":{ + "shape":"Timestamp", + "documentation":"

      The start time for the conversation log.

      " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

      The end time for the conversation log.

      " + }, + "inputMode":{ + "shape":"ConversationLogsInputModeFilter", + "documentation":"

      The selection to filter by input mode for the conversation logs.

      " + } + }, + "documentation":"

      The selected data source to filter the conversation log.

      " + }, + "ConversationLogsInputModeFilter":{ + "type":"string", + "enum":[ + "Speech", + "Text" + ] + }, + "Count":{"type":"integer"}, + "CreateBotAliasRequest":{ + "type":"structure", + "required":[ + "botAliasName", + "botId" + ], + "members":{ + "botAliasName":{ + "shape":"Name", + "documentation":"

      The alias to create. The name must be unique for the bot.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      A description of the alias. Use this description to help identify the alias.

      " }, "botVersion":{ "shape":"NumericalBotVersion", @@ -3184,7 +3707,7 @@ }, "botVersion":{ "shape":"DraftBotVersion", - "documentation":"

      The identifier of the version of the bot associated with this intent.

      ", + "documentation":"

      The version of the bot associated with this intent.

      ", "location":"uri", "locationName":"botVersion" }, @@ -3257,7 +3780,7 @@ }, "botVersion":{ "shape":"DraftBotVersion", - "documentation":"

      The identifier of the version of the bot associated with the intent.

      " + "documentation":"

      The version of the bot associated with the intent.

      " }, "localeId":{ "shape":"LocaleId", @@ -3331,7 +3854,7 @@ }, "principal":{ "shape":"PrincipalList", - "documentation":"

      An IAM principal, such as an IAM users, IAM roles, or AWS services that is allowed or denied access to a resource. For more information, see AWS JSON policy elements: Principal.

      " + "documentation":"

      An IAM principal, such as an IAM user, IAM role, or Amazon Web Services services that is allowed or denied access to a resource. For more information, see Amazon Web Services JSON policy elements: Principal.

      " }, "action":{ "shape":"OperationList", @@ -3419,7 +3942,7 @@ }, "multipleValuesSetting":{ "shape":"MultipleValuesSetting", - "documentation":"

      Indicates whether the slot returns multiple values in one response. Multi-value slots are only available in the en-US locale. If you set this value to true in any other locale, Amazon Lex throws a ValidationException.

      If the multipleValuesSetting is not set, the default value is false.

      " + "documentation":"

      Indicates whether the slot returns multiple values in one response. Multi-value slots are only available in the en-US locale. If you set this value to true in any other locale, Amazon Lex throws a ValidationException.

      If the multipleValuesSetting is not set, the default value is false.

      " }, "subSlotSetting":{ "shape":"SubSlotSetting", @@ -3495,7 +4018,7 @@ "members":{ "slotTypeName":{ "shape":"Name", - "documentation":"

      The name for the slot. A slot type name must be unique within the account.

      " + "documentation":"

      The name for the slot. A slot type name must be unique within the intent.

      " }, "description":{ "shape":"Description", @@ -3507,7 +4030,7 @@ }, "valueSelectionSetting":{ "shape":"SlotValueSelectionSetting", - "documentation":"

      Determines the strategy that Amazon Lex uses to select a value from the list of possible values. The field can be set to one of the following values:

      • OriginalValue - Returns the value entered by the user, if the user value is similar to the slot value.

      • TopResolution - If there is a resolution list for the slot, return the first value in the resolution list. If there is no resolution list, return null.

      If you don't specify the valueSelectionSetting parameter, the default is OriginalValue.

      " + "documentation":"

      Determines the strategy that Amazon Lex uses to select a value from the list of possible values. The field can be set to one of the following values:

      • ORIGINAL_VALUE - Returns the value entered by the user, if the user value is similar to the slot value.

      • TOP_RESOLUTION - If there is a resolution list for the slot, return the first value in the resolution list. If there is no resolution list, return null.

      If you don't specify the valueSelectionSetting parameter, the default is ORIGINAL_VALUE.

      " }, "parentSlotTypeSignature":{ "shape":"SlotTypeSignature", @@ -3594,6 +4117,46 @@ } } }, + "CreateTestSetDiscrepancyReportRequest":{ + "type":"structure", + "required":[ + "testSetId", + "target" + ], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set discrepancy report.

      ", + "location":"uri", + "locationName":"testSetId" + }, + "target":{ + "shape":"TestSetDiscrepancyReportResourceTarget", + "documentation":"

      The target bot for the test set discrepancy report.

      " + } + } + }, + "CreateTestSetDiscrepancyReportResponse":{ + "type":"structure", + "members":{ + "testSetDiscrepancyReportId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set discrepancy report to describe.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the test set discrepancy report.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set discrepancy report.

      " + }, + "target":{ + "shape":"TestSetDiscrepancyReportResourceTarget", + "documentation":"

      The target bot for the test set discrepancy report.

      " + } + } + }, "CreateUploadUrlRequest":{ "type":"structure", "members":{ @@ -3763,7 +4326,7 @@ "members":{ "childDirected":{ "shape":"ChildDirected", - "documentation":"

      For each Amazon Lex bot created with the Amazon Lex Model Building Service, you must specify whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to the Children's Online Privacy Protection Act (COPPA) by specifying true or false in the childDirected field. By specifying true in the childDirected field, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. By specifying false in the childDirected field, you confirm that your use of Amazon Lex is not related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. You may not specify a default value for the childDirected field that does not accurately reflect whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. If your use of Amazon Lex relates to a website, program, or other application that is directed in whole or in part, to children under age 13, you must obtain any required verifiable parental consent under COPPA. For information regarding the use of Amazon Lex in connection with websites, programs, or other applications that are directed or targeted, in whole or in part, to children under age 13, see the Amazon Lex FAQ.

      " + "documentation":"

      For each Amazon Lex bot created with the Amazon Lex Model Building Service, you must specify whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to the Children's Online Privacy Protection Act (COPPA) by specifying true or false in the childDirected field. By specifying true in the childDirected field, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. By specifying false in the childDirected field, you confirm that your use of Amazon Lex is not related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. You may not specify a default value for the childDirected field that does not accurately reflect whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. If your use of Amazon Lex relates to a website, program, or other application that is directed in whole or in part, to children under age 13, you must obtain any required verifiable parental consent under COPPA. For information regarding the use of Amazon Lex in connection with websites, programs, or other applications that are directed or targeted, in whole or in part, to children under age 13, see the Amazon Lex FAQ.

      " } }, "documentation":"

      By default, data stored by Amazon Lex is encrypted. The DataPrivacy structure provides settings that determine how Amazon Lex handles special cases of securing the data for your bot.

      " @@ -4259,6 +4822,18 @@ } } }, + "DeleteTestSetRequest":{ + "type":"structure", + "required":["testSetId"], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id of the test set to be deleted.

      ", + "location":"uri", + "locationName":"testSetId" + } + } + }, "DeleteUtterancesRequest":{ "type":"structure", "required":["botId"], @@ -4379,7 +4954,7 @@ }, "botVersion":{ "shape":"BotVersion", - "documentation":"

      The identifier of the version of the bot associated with the locale.

      ", + "documentation":"

      The version of the bot associated with the locale.

      ", "location":"uri", "locationName":"botVersion" }, @@ -4400,7 +4975,7 @@ }, "botVersion":{ "shape":"BotVersion", - "documentation":"

      The identifier of the version of the bot associated with the locale.

      " + "documentation":"

      The version of the bot associated with the locale.

      " }, "localeId":{ "shape":"LocaleId", @@ -4958,7 +5533,7 @@ }, "initialResponseSetting":{ "shape":"InitialResponseSetting", - "documentation":"

      " + "documentation":"

      Configuration setting for a response sent to the user before Amazon Lex starts eliciting slots.

      " } } }, @@ -5183,6 +5758,234 @@ } } }, + "DescribeTestExecutionRequest":{ + "type":"structure", + "required":["testExecutionId"], + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The execution Id of the test set execution.

      ", + "location":"uri", + "locationName":"testExecutionId" + } + } + }, + "DescribeTestExecutionResponse":{ + "type":"structure", + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The execution Id for the test set execution.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The execution creation date and time for the test set execution.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time of the last update for the execution.

      " + }, + "testExecutionStatus":{ + "shape":"TestExecutionStatus", + "documentation":"

      The test execution status for the test execution.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set execution.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name of the test set execution.

      " + }, + "target":{ + "shape":"TestExecutionTarget", + "documentation":"

      The target bot for the test set execution details.

      " + }, + "apiMode":{ + "shape":"TestExecutionApiMode", + "documentation":"

      Indicates whether we use streaming or non-streaming APIs are used for the test set execution. For streaming, StartConversation Amazon Lex Runtime API is used. Whereas for non-streaming, RecognizeUtterance and RecognizeText Amazon Lex Runtime API is used.

      " + }, + "testExecutionModality":{ + "shape":"TestExecutionModality", + "documentation":"

      Indicates whether test set is audio or text.

      " + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

      Reasons for the failure of the test set execution.

      " + } + } + }, + "DescribeTestSetDiscrepancyReportRequest":{ + "type":"structure", + "required":["testSetDiscrepancyReportId"], + "members":{ + "testSetDiscrepancyReportId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set discrepancy report.

      ", + "location":"uri", + "locationName":"testSetDiscrepancyReportId" + } + } + }, + "DescribeTestSetDiscrepancyReportResponse":{ + "type":"structure", + "members":{ + "testSetDiscrepancyReportId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set discrepancy report to describe.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set discrepancy report.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The time and date of creation for the test set discrepancy report.

      " + }, + "target":{ + "shape":"TestSetDiscrepancyReportResourceTarget", + "documentation":"

      The target bot location for the test set discrepancy report.

      " + }, + "testSetDiscrepancyReportStatus":{ + "shape":"TestSetDiscrepancyReportStatus", + "documentation":"

      The status for the test set discrepancy report.

      " + }, + "lastUpdatedDataTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time of the last update for the test set discrepancy report.

      " + }, + "testSetDiscrepancyTopErrors":{ + "shape":"TestSetDiscrepancyErrors", + "documentation":"

      The top 200 error results from the test set discrepancy report.

      " + }, + "testSetDiscrepancyRawOutputUrl":{ + "shape":"PresignedS3Url", + "documentation":"

      Pre-signed Amazon S3 URL to download the test set discrepancy report.

      " + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

      The failure report for the test set discrepancy report generation action.

      " + } + } + }, + "DescribeTestSetGenerationRequest":{ + "type":"structure", + "required":["testSetGenerationId"], + "members":{ + "testSetGenerationId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set generation.

      ", + "location":"uri", + "locationName":"testSetGenerationId" + } + } + }, + "DescribeTestSetGenerationResponse":{ + "type":"structure", + "members":{ + "testSetGenerationId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set generation.

      " + }, + "testSetGenerationStatus":{ + "shape":"TestSetGenerationStatus", + "documentation":"

      The status for the test set generation.

      " + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

      The reasons the test set generation failed.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The unique identifier for the test set created for the generated test set.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name for the generated test set.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The test set description for the test set generation.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      The Amazon S3 storage location for the test set generation.

      " + }, + "generationDataSource":{ + "shape":"TestSetGenerationDataSource", + "documentation":"

      The data source of the test set used for the test set generation.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The roleARN of the test set used for the test set generation.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the test set generation.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time of the last update for the test set generation.

      " + } + } + }, + "DescribeTestSetRequest":{ + "type":"structure", + "required":["testSetId"], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set request.

      ", + "location":"uri", + "locationName":"testSetId" + } + } + }, + "DescribeTestSetResponse":{ + "type":"structure", + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set response.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name of the test set.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the test set.

      " + }, + "modality":{ + "shape":"TestSetModality", + "documentation":"

      Indicates whether the test set is audio or text data.

      " + }, + "status":{ + "shape":"TestSetStatus", + "documentation":"

      The status of the test set.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The roleARN used for any operation in the test set to access resources in the Amazon Web Services account.

      " + }, + "numTurns":{ + "shape":"Count", + "documentation":"

      The total number of agent and user turn in the test set.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      The Amazon S3 storage location for the test set data.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the test set data.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time for the last update of the test set data.

      " + } + } + }, "Description":{ "type":"string", "max":200, @@ -5205,7 +6008,7 @@ "documentation":"

      When true the next message for the intent is not used.

      " } }, - "documentation":"

      Defines the action that the bot executes at runtime when the conversation reaches this step.

      " + "documentation":"

      Defines the action that the bot executes at runtime when the conversation reaches this step.

      " }, "DialogActionType":{ "type":"string", @@ -5246,7 +6049,7 @@ "documentation":"

      Contains the responses and actions that Amazon Lex takes after the Lambda function is complete.

      " } }, - "documentation":"

      Settings that specify the dialog code hook that is called by Amazon Lex at a step of the conversation.

      " + "documentation":"

      Settings that specify the dialog code hook that is called by Amazon Lex at a step of the conversation.

      " }, "DialogCodeHookSettings":{ "type":"structure", @@ -5328,17 +6131,35 @@ }, "ErrorMessage":{"type":"string"}, "ExceptionMessage":{"type":"string"}, - "ExportFilter":{ + "ExecutionErrorDetails":{ "type":"structure", "required":[ - "name", - "values", - "operator" + "errorCode", + "errorMessage" ], "members":{ - "name":{ - "shape":"ExportFilterName", - "documentation":"

      The name of the field to use for filtering.

      " + "errorCode":{ + "shape":"NonEmptyString", + "documentation":"

      The error code for the error.

      " + }, + "errorMessage":{ + "shape":"NonEmptyString", + "documentation":"

      The message describing the error.

      " + } + }, + "documentation":"

      Details about an error in an execution of a test set.

      " + }, + "ExportFilter":{ + "type":"structure", + "required":[ + "name", + "values", + "operator" + ], + "members":{ + "name":{ + "shape":"ExportFilterName", + "documentation":"

      The name of the field to use for filtering.

      " }, "values":{ "shape":"FilterValues", @@ -5382,6 +6203,10 @@ "customVocabularyExportSpecification":{ "shape":"CustomVocabularyExportSpecification", "documentation":"

      The parameters required to export a custom vocabulary.

      " + }, + "testSetExportSpecification":{ + "shape":"TestSetExportSpecification", + "documentation":"

      Specifications for the test set that is exported as a resource.

      " } }, "documentation":"

      Provides information about the bot or bot locale that you want to export. You can specify the botExportSpecification or the botLocaleExportSpecification, but not both.

      " @@ -5547,7 +6372,7 @@ }, "messageGroups":{ "shape":"MessageGroupsList", - "documentation":"

      One to 5 message groups that contain start messages. Amazon Lex chooses one of the messages to play to the user.

      " + "documentation":"

      1 - 5 message groups that contain start messages. Amazon Lex chooses one of the messages to play to the user.

      " }, "allowInterrupt":{ "shape":"BoxedBoolean", @@ -5579,7 +6404,7 @@ }, "messageGroups":{ "shape":"MessageGroupsList", - "documentation":"

      One to 5 message groups that contain update messages. Amazon Lex chooses one of the messages to play to the user.

      " + "documentation":"

      1 - 5 message groups that contain update messages. Amazon Lex chooses one of the messages to play to the user.

      " }, "allowInterrupt":{ "shape":"BoxedBoolean", @@ -5611,6 +6436,31 @@ }, "documentation":"

      Provides information for updating the user on the progress of fulfilling an intent.

      " }, + "GetTestExecutionArtifactsUrlRequest":{ + "type":"structure", + "required":["testExecutionId"], + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the completed test execution.

      ", + "location":"uri", + "locationName":"testExecutionId" + } + } + }, + "GetTestExecutionArtifactsUrlResponse":{ + "type":"structure", + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the completed test execution.

      " + }, + "downloadArtifactsUrl":{ + "shape":"PresignedS3Url", + "documentation":"

      The pre-signed Amazon S3 URL to download completed test execution.

      " + } + } + }, "GrammarSlotTypeSetting":{ "type":"structure", "members":{ @@ -5630,15 +6480,15 @@ "members":{ "s3BucketName":{ "shape":"S3BucketName", - "documentation":"

      The name of the S3 bucket that contains the grammar source.

      " + "documentation":"

      The name of the Amazon S3 bucket that contains the grammar source.

      " }, "s3ObjectKey":{ "shape":"S3ObjectPath", - "documentation":"

      The path to the grammar in the S3 bucket.

      " + "documentation":"

      The path to the grammar in the Amazon S3 bucket.

      " }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

      The Amazon KMS key required to decrypt the contents of the grammar, if any.

      " + "documentation":"

      The KMS key required to decrypt the contents of the grammar, if any.

      " } }, "documentation":"

      Describes the Amazon S3 bucket name and location for the grammar that is the source for the slot type.

      " @@ -5677,7 +6527,8 @@ "type":"string", "enum":[ "LexJson", - "TSV" + "TSV", + "CSV" ] }, "ImportExportFilePassword":{ @@ -5737,7 +6588,11 @@ "shape":"BotLocaleImportSpecification", "documentation":"

      Parameters for importing a bot locale.

      " }, - "customVocabularyImportSpecification":{"shape":"CustomVocabularyImportSpecification"} + "customVocabularyImportSpecification":{"shape":"CustomVocabularyImportSpecification"}, + "testSetImportResourceSpecification":{ + "shape":"TestSetImportResourceSpecification", + "documentation":"

      Specifications for the test set that is imported.

      " + } }, "documentation":"

      Provides information about the bot or bot locale that you want to import. You can specify the botImportSpecification or the botLocaleImportSpecification, but not both.

      " }, @@ -5746,7 +6601,8 @@ "enum":[ "Bot", "BotLocale", - "CustomVocabulary" + "CustomVocabulary", + "TestSet" ] }, "ImportSortAttribute":{ @@ -5850,7 +6706,7 @@ "documentation":"

      The name of the context.

      " } }, - "documentation":"

      The name of a context that must be active for an intent to be selected by Amazon Lex.

      " + "documentation":"

      A context that must be active for an intent to be selected by Amazon Lex.

      " }, "InputContextsList":{ "type":"list", @@ -5858,6 +6714,84 @@ "max":5, "min":0 }, + "InputSessionStateSpecification":{ + "type":"structure", + "members":{ + "sessionAttributes":{ + "shape":"StringMap", + "documentation":"

      Session attributes for the session state.

      " + }, + "activeContexts":{ + "shape":"ActiveContextList", + "documentation":"

      Active contexts for the session state.

      " + }, + "runtimeHints":{ + "shape":"RuntimeHints", + "documentation":"

      Runtime hints for the session state.

      " + } + }, + "documentation":"

      Specifications for the current state of the dialog between the user and the bot in the test set.

      " + }, + "IntentClassificationTestResultItem":{ + "type":"structure", + "required":[ + "intentName", + "multiTurnConversation", + "resultCounts" + ], + "members":{ + "intentName":{ + "shape":"Name", + "documentation":"

      The name of the intent.

      " + }, + "multiTurnConversation":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the conversation involves multiple turns or not.

      " + }, + "resultCounts":{ + "shape":"IntentClassificationTestResultItemCounts", + "documentation":"

      The result of the intent classification test.

      " + } + }, + "documentation":"

      Information for an intent that is classified by the test workbench.

      " + }, + "IntentClassificationTestResultItemCounts":{ + "type":"structure", + "required":[ + "totalResultCount", + "intentMatchResultCounts" + ], + "members":{ + "totalResultCount":{ + "shape":"Count", + "documentation":"

      The total number of results in the intent classification test.

      " + }, + "speechTranscriptionResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of matched, mismatched, and execution error results for speech transcription for the intent.

      " + }, + "intentMatchResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of matched and mismatched results for intent recognition for the intent.

      " + } + }, + "documentation":"

      The number of items in the intent classification test.

      " + }, + "IntentClassificationTestResultItemList":{ + "type":"list", + "member":{"shape":"IntentClassificationTestResultItem"} + }, + "IntentClassificationTestResults":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"IntentClassificationTestResultItemList", + "documentation":"

      A list of the results for the intent classification test.

      " + } + }, + "documentation":"

      Information for the results of the intent classification test.

      " + }, "IntentClosingSetting":{ "type":"structure", "members":{ @@ -5970,6 +6904,44 @@ "max":1, "min":1 }, + "IntentLevelSlotResolutionTestResultItem":{ + "type":"structure", + "required":[ + "intentName", + "multiTurnConversation", + "slotResolutionResults" + ], + "members":{ + "intentName":{ + "shape":"Name", + "documentation":"

      The name of the intent that was recognized.

      " + }, + "multiTurnConversation":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the conversation involves multiple turns or not.

      " + }, + "slotResolutionResults":{ + "shape":"SlotResolutionTestResultItems", + "documentation":"

      The results for the slot resolution in the test execution result.

      " + } + }, + "documentation":"

      Information about intent-level slot resolution in a test result.

      " + }, + "IntentLevelSlotResolutionTestResultItemList":{ + "type":"list", + "member":{"shape":"IntentLevelSlotResolutionTestResultItem"} + }, + "IntentLevelSlotResolutionTestResults":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"IntentLevelSlotResolutionTestResultItemList", + "documentation":"

      Indicates the items for the slot level resolution for the intents.

      " + } + }, + "documentation":"

      Indicates the success or failure of slots at the intent level.

      " + }, "IntentOverride":{ "type":"structure", "members":{ @@ -5979,7 +6951,7 @@ }, "slots":{ "shape":"SlotValueOverrideMap", - "documentation":"

      A map of all of the slot value overrides for the intent. The name of the slot maps to the value of the slot. Slots that are not included in the map aren't overridden.,

      " + "documentation":"

      A map of all of the slot value overrides for the intent. The name of the slot maps to the value of the slot. Slots that are not included in the map aren't overridden.

      " } }, "documentation":"

      Override settings to configure the intent state.

      " @@ -6079,18 +7051,18 @@ "members":{ "kendraIndex":{ "shape":"KendraIndexArn", - "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Kendra index that you want the AMAZON.KendraSearchIntent intent to search. The index must be in the same account and Region as the Amazon Lex bot.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Kendra index that you want the AMAZON.KendraSearchIntent intent to search. The index must be in the same account and Region as the Amazon Lex bot.

      " }, "queryFilterStringEnabled":{ "shape":"Boolean", - "documentation":"

      Determines whether the AMAZON.KendraSearchIntent intent uses a custom query string to query the Amazon Kendra index.

      " + "documentation":"

      Determines whether the AMAZON.KendraSearchIntent intent uses a custom query string to query the Amazon Kendra index.

      " }, "queryFilterString":{ "shape":"QueryFilterString", "documentation":"

      A query filter that Amazon Lex sends to Amazon Kendra to filter the response from a query. The filter is in the format defined by Amazon Kendra. For more information, see Filtering queries.

      " } }, - "documentation":"

      Provides configuration information for the AMAZON.KendraSearchIntent intent. When you use this intent, Amazon Lex searches the specified Amazon Kendra index and returns documents from the index that match the user's utterance.

      " + "documentation":"

      Provides configuration information for the AMAZON.KendraSearchIntent intent. When you use this intent, Amazon Lex searches the specified Amazon Kendra index and returns documents from the index that match the user's utterance.

      " }, "KendraIndexArn":{ "type":"string", @@ -7032,6 +8004,139 @@ } } }, + "ListTestExecutionResultItemsRequest":{ + "type":"structure", + "required":[ + "testExecutionId", + "resultFilterBy" + ], + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test execution to list the result items.

      ", + "location":"uri", + "locationName":"testExecutionId" + }, + "resultFilterBy":{ + "shape":"TestExecutionResultFilterBy", + "documentation":"

      The filter for the list of results from the test set execution.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of test execution result items to return in each page. If there are fewer results than the max page size, only the actual number of results are returned.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If the response from the ListTestExecutionResultItems operation contains more results than specified in the maxResults parameter, a token is returned in the response. Use that token in the nextToken parameter to return the next page of results.

      " + } + } + }, + "ListTestExecutionResultItemsResponse":{ + "type":"structure", + "members":{ + "testExecutionResults":{ + "shape":"TestExecutionResultItems", + "documentation":"

      The list of results from the test execution.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token that indicates whether there are more results to return in a response to the ListTestExecutionResultItems operation. If the nextToken field is present, you send the contents as the nextToken parameter of a ListTestExecutionResultItems operation request to get the next page of results.

      " + } + } + }, + "ListTestExecutionsRequest":{ + "type":"structure", + "members":{ + "sortBy":{ + "shape":"TestExecutionSortBy", + "documentation":"

      The sort order of the test set executions.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of test executions to return in each page. If there are fewer results than the max page size, only the actual number of results are returned.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If the response from the ListTestExecutions operation contains more results than specified in the maxResults parameter, a token is returned in the response. Use that token in the nextToken parameter to return the next page of results.

      " + } + } + }, + "ListTestExecutionsResponse":{ + "type":"structure", + "members":{ + "testExecutions":{ + "shape":"TestExecutionSummaryList", + "documentation":"

      The list of test executions.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token that indicates whether there are more results to return in a response to the ListTestExecutions operation. If the nextToken field is present, you send the contents as the nextToken parameter of a ListTestExecutions operation request to get the next page of results.

      " + } + } + }, + "ListTestSetRecordsRequest":{ + "type":"structure", + "required":["testSetId"], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The identifier of the test set to list its test set records.

      ", + "location":"uri", + "locationName":"testSetId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of test set records to return in each page. If there are fewer records than the max page size, only the actual number of records are returned.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If the response from the ListTestSetRecords operation contains more results than specified in the maxResults parameter, a token is returned in the response. Use that token in the nextToken parameter to return the next page of results.

      " + } + } + }, + "ListTestSetRecordsResponse":{ + "type":"structure", + "members":{ + "testSetRecords":{ + "shape":"TestSetTurnRecordList", + "documentation":"

      The list of records from the test set.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token that indicates whether there are more records to return in a response to the ListTestSetRecords operation. If the nextToken field is present, you send the contents as the nextToken parameter of a ListTestSetRecords operation request to get the next page of records.

      " + } + } + }, + "ListTestSetsRequest":{ + "type":"structure", + "members":{ + "sortBy":{ + "shape":"TestSetSortBy", + "documentation":"

      The sort order for the list of test sets.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of test sets to return in each page. If there are fewer results than the max page size, only the actual number of results are returned.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If the response from the ListTestSets operation contains more results than specified in the maxResults parameter, a token is returned in the response. Use that token in the nextToken parameter to return the next page of results.

      " + } + } + }, + "ListTestSetsResponse":{ + "type":"structure", + "members":{ + "testSets":{ + "shape":"TestSetSummaryList", + "documentation":"

      The selected test sets in a list of test sets.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      A token that indicates whether there are more results to return in a response to the ListTestSets operation. If the nextToken field is present, you send the contents as the nextToken parameter of a ListTestSets operation request to get the next page of results.

      " + } + } + }, "LocaleId":{"type":"string"}, "LocaleName":{"type":"string"}, "LogPrefix":{ @@ -7077,7 +8182,7 @@ "documentation":"

      A message that defines a response card that the client application can show to the user.

      " } }, - "documentation":"

      The object that provides message text and it's type.

      " + "documentation":"

      The object that provides message text and its type.

      " }, "MessageGroup":{ "type":"structure", @@ -7128,7 +8233,7 @@ "type":"string", "max":100, "min":1, - "pattern":"^([0-9a-zA-Z][_-]?)+$" + "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" }, "NewCustomVocabularyItem":{ "type":"structure", @@ -7234,6 +8339,48 @@ "max":10, "min":0 }, + "OverallTestResultItem":{ + "type":"structure", + "required":[ + "multiTurnConversation", + "totalResultCount", + "endToEndResultCounts" + ], + "members":{ + "multiTurnConversation":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the conversation contains multiple turns or not.

      " + }, + "totalResultCount":{ + "shape":"Count", + "documentation":"

      The total number of overall results in the result of the test execution.

      " + }, + "speechTranscriptionResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of speech transcription results in the overall test.

      " + }, + "endToEndResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of results that succeeded.

      " + } + }, + "documentation":"

      Information about the overall results for a test execution result.

      " + }, + "OverallTestResultItemList":{ + "type":"list", + "member":{"shape":"OverallTestResultItem"} + }, + "OverallTestResults":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"OverallTestResultItemList", + "documentation":"

      A list of the overall test results.

      " + } + }, + "documentation":"

      Information about the overall test results.

      " + }, "ParentBotNetwork":{ "type":"structure", "required":[ @@ -7378,7 +8525,7 @@ "members":{ "service":{ "shape":"ServicePrincipal", - "documentation":"

      The name of the AWS service that should allowed or denied access to an Amazon Lex action.

      " + "documentation":"

      The name of the Amazon Web Services service that should allowed or denied access to an Amazon Lex action.

      " }, "arn":{ "shape":"PrincipalArn", @@ -7509,6 +8656,11 @@ "type":"list", "member":{"shape":"RecommendedIntentSummary"} }, + "RecordNumber":{ + "type":"long", + "max":200000, + "min":1 + }, "RegexPattern":{ "type":"string", "max":300, @@ -7570,6 +8722,52 @@ "min":32, "pattern":"^arn:aws:iam::[0-9]{12}:role/.*$" }, + "RuntimeHintDetails":{ + "type":"structure", + "members":{ + "runtimeHintValues":{ + "shape":"RuntimeHintValuesList", + "documentation":"

      One or more strings that Amazon Lex should look for in the input to the bot. Each phrase is given preference when deciding on slot values.

      " + }, + "subSlotHints":{ + "shape":"SlotHintsSlotMap", + "documentation":"

      A map of constituent sub slot names inside a composite slot in the intent and the phrases that should be added for each sub slot. Inside each composite slot hints, this structure provides a mechanism to add granular sub slot phrases. Only sub slot hints are supported for composite slots. The intent name, composite slot name and the constituent sub slot names must exist.

      " + } + }, + "documentation":"

      Provides an array of phrases that should be given preference when resolving values for a slot.

      " + }, + "RuntimeHintPhrase":{ + "type":"string", + "max":140, + "min":1 + }, + "RuntimeHintValue":{ + "type":"structure", + "required":["phrase"], + "members":{ + "phrase":{ + "shape":"RuntimeHintPhrase", + "documentation":"

      The phrase that Amazon Lex should look for in the user's input to the bot.

      " + } + }, + "documentation":"

      Provides the phrase that Amazon Lex should look for in the user's input to the bot.

      " + }, + "RuntimeHintValuesList":{ + "type":"list", + "member":{"shape":"RuntimeHintValue"}, + "max":100, + "min":1 + }, + "RuntimeHints":{ + "type":"structure", + "members":{ + "slotHints":{ + "shape":"SlotHintsIntentMap", + "documentation":"

      A list of the slots in the intent that should have runtime hints added, and the phrases that should be added for each slot.

      The first level of the slotHints map is the name of the intent. The second level is the name of the slot within the intent. For more information, see Using hints to improve accuracy.

      The intent name and slot name must exist.

      " + } + }, + "documentation":"

      You can provide Amazon Lex with hints to the phrases that a customer is likely to use for a slot. When a slot with hints is resolved, the phrases in the runtime hints are preferred in the resolution. You can provide hints for a maximum of 100 intents. You can provide a maximum of 100 slots.

      Before you can use runtime hints with an existing bot, you must first rebuild the bot.

      For more information, see Using runtime hints to improve recognition of slot values.

      " + }, "S3BucketArn":{ "type":"string", "max":2048, @@ -7585,7 +8783,7 @@ "members":{ "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

      The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key for encrypting audio log files stored in an S3 bucket.

      " + "documentation":"

      The Amazon Resource Name (ARN) of an Amazon Web Services Key Management Service (KMS) key for encrypting audio log files stored in an S3 bucket.

      " }, "s3BucketArn":{ "shape":"S3BucketArn", @@ -7925,6 +9123,16 @@ "max":1, "min":1 }, + "SlotHintsIntentMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"SlotHintsSlotMap"} + }, + "SlotHintsSlotMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"RuntimeHintDetails"} + }, "SlotPrioritiesList":{ "type":"list", "member":{"shape":"SlotPriority"} @@ -7938,7 +9146,7 @@ "members":{ "priority":{ "shape":"PriorityValue", - "documentation":"

      The priority that a slot should be elicited.

      " + "documentation":"

      The priority that Amazon Lex should apply to the slot.

      " }, "slotId":{ "shape":"Id", @@ -7947,6 +9155,50 @@ }, "documentation":"

      Sets the priority that Amazon Lex should use when eliciting slot values from a user.

      " }, + "SlotResolutionTestResultItem":{ + "type":"structure", + "required":[ + "slotName", + "resultCounts" + ], + "members":{ + "slotName":{ + "shape":"TestResultSlotName", + "documentation":"

      The name of the slot.

      " + }, + "resultCounts":{ + "shape":"SlotResolutionTestResultItemCounts", + "documentation":"

      A result for slot resolution in the results of a test execution.

      " + } + }, + "documentation":"

      Information about the success and failure rate of slot resolution in the results of a test execution.

      " + }, + "SlotResolutionTestResultItemCounts":{ + "type":"structure", + "required":[ + "totalResultCount", + "slotMatchResultCounts" + ], + "members":{ + "totalResultCount":{ + "shape":"Count", + "documentation":"

      The total number of results.

      " + }, + "speechTranscriptionResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of matched, mismatched and execution error results for speech transcription for the slot.

      " + }, + "slotMatchResultCounts":{ + "shape":"TestResultMatchStatusCountMap", + "documentation":"

      The number of matched and mismatched results for slot resolution for the slot.

      " + } + }, + "documentation":"

      Information about the counts for a slot resolution in the results of a test execution.

      " + }, + "SlotResolutionTestResultItems":{ + "type":"list", + "member":{"shape":"SlotResolutionTestResultItem"} + }, "SlotShape":{ "type":"string", "enum":[ @@ -8130,7 +9382,7 @@ }, "slotTypeCategory":{ "shape":"SlotTypeCategory", - "documentation":"

      Indicates the type of the slot type.

      • Custom - A slot type that you created using custom values. For more information, see Creating custom slot types.

      • Extended - A slot type created by extending the AMAZON.AlphaNumeric built-in slot type. For more information, see AMAZON.AlphaNumeric.

      • ExternalGrammar - A slot type using a custom GRXML grammar to define values. For more information, see Using a custom grammar slot type.

      " + "documentation":"

      Indicates the type of the slot type.

      " } }, "documentation":"

      Provides summary information about a slot type.

      " @@ -8195,7 +9447,7 @@ "documentation":"

      Specifies the settings that Amazon Lex uses when a slot value is successfully entered by a user.

      " } }, - "documentation":"

      Specifies the elicitation setting details for constituent sub slots of a composite slot.

      " + "documentation":"

      Specifies the elicitation setting details eliciting a slot.

      " }, "SlotValueOverride":{ "type":"structure", @@ -8226,7 +9478,7 @@ "members":{ "pattern":{ "shape":"RegexPattern", - "documentation":"

      A regular expression used to validate the value of a slot.

      Use a standard regular expression. Amazon Lex supports the following characters in the regular expression:

      • A-Z, a-z

      • 0-9

      • Unicode characters (\"\\ u<Unicode>\")

      Represent Unicode characters with four digits, for example \"\\u0041\" or \"\\u005A\".

      The following regular expression operators are not supported:

      • Infinite repeaters: *, +, or {x,} with no upper bound.

      • Wild card (.)

      " + "documentation":"

      A regular expression used to validate the value of a slot.

      Use a standard regular expression. Amazon Lex supports the following characters in the regular expression:

      • A-Z, a-z

      • 0-9

      • Unicode characters (\"\\⁠u<Unicode>\")

      Represent Unicode characters with four digits, for example \"\\⁠u0041\" or \"\\⁠u005A\".

      The following regular expression operators are not supported:

      • Infinite repeaters: *, +, or {x,} with no upper bound.

      • Wild card (.)

      " } }, "documentation":"

      Provides a regular expression used to validate the value of a slot.

      " @@ -8245,7 +9497,7 @@ "members":{ "resolutionStrategy":{ "shape":"SlotValueResolutionStrategy", - "documentation":"

      Determines the slot resolution strategy that Amazon Lex uses to return slot type values. The field can be set to one of the following values:

      • OriginalValue - Returns the value entered by the user, if the user value is similar to the slot value.

      • TopResolution - If there is a resolution list for the slot, return the first value in the resolution list as the slot type value. If there is no resolution list, null is returned.

      If you don't specify the valueSelectionStrategy, the default is OriginalValue.

      " + "documentation":"

      Determines the slot resolution strategy that Amazon Lex uses to return slot type values. The field can be set to one of the following values:

      • ORIGINAL_VALUE - Returns the value entered by the user, if the user value is similar to the slot value.

      • TOP_RESOLUTION - If there is a resolution list for the slot, return the first value in the resolution list as the slot type value. If there is no resolution list, null is returned.

      If you don't specify the valueSelectionStrategy, the default is ORIGINAL_VALUE.

      " }, "regexFilter":{ "shape":"SlotValueRegexFilter", @@ -8253,7 +9505,7 @@ }, "advancedRecognitionSetting":{ "shape":"AdvancedRecognitionSetting", - "documentation":"

      Provides settings that enable advanced recognition settings for slot values.

      " + "documentation":"

      Provides settings that enable advanced recognition settings for slot values. You can use this to enable using slot values as a custom vocabulary for recognizing user utterances.

      " } }, "documentation":"

      Contains settings used by Amazon Lex to select a slot value.

      " @@ -8412,7 +9664,140 @@ } } }, - "StillWaitingResponseFrequency":{ + "StartTestExecutionRequest":{ + "type":"structure", + "required":[ + "testSetId", + "target", + "apiMode" + ], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set execution.

      ", + "location":"uri", + "locationName":"testSetId" + }, + "target":{ + "shape":"TestExecutionTarget", + "documentation":"

      The target bot for the test set execution.

      " + }, + "apiMode":{ + "shape":"TestExecutionApiMode", + "documentation":"

      Indicates whether we use streaming or non-streaming APIs for the test set execution. For streaming, StartConversation Runtime API is used. Whereas, for non-streaming, RecognizeUtterance and RecognizeText Amazon Lex Runtime API are used.

      " + }, + "testExecutionModality":{ + "shape":"TestExecutionModality", + "documentation":"

      Indicates whether audio or text is used.

      " + } + } + }, + "StartTestExecutionResponse":{ + "type":"structure", + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set execution.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the test set execution.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for the test set execution.

      " + }, + "target":{ + "shape":"TestExecutionTarget", + "documentation":"

      The target bot for the test set execution.

      " + }, + "apiMode":{ + "shape":"TestExecutionApiMode", + "documentation":"

      Indicates whether we use streaming or non-streaming APIs for the test set execution. For streaming, StartConversation Amazon Lex Runtime API is used. Whereas for non-streaming, RecognizeUtterance and RecognizeText Amazon Lex Runtime API are used.

      " + }, + "testExecutionModality":{ + "shape":"TestExecutionModality", + "documentation":"

      Indicates whether audio or text is used.

      " + } + } + }, + "StartTestSetGenerationRequest":{ + "type":"structure", + "required":[ + "testSetName", + "storageLocation", + "generationDataSource", + "roleArn" + ], + "members":{ + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name for the test set generation request.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The test set description for the test set generation request.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      The Amazon S3 storage location for the test set generation.

      " + }, + "generationDataSource":{ + "shape":"TestSetGenerationDataSource", + "documentation":"

      The data source for the test set generation.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The roleARN used for any operation in the test set to access resources in the Amazon Web Services account.

      " + }, + "testSetTags":{ + "shape":"TagMap", + "documentation":"

      A list of tags to add to the test set. You can only add tags when you import/generate a new test set. You can't use the UpdateTestSet operation to update tags. To update tags, use the TagResource operation.

      " + } + } + }, + "StartTestSetGenerationResponse":{ + "type":"structure", + "members":{ + "testSetGenerationId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set generation to describe.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the test set generation.

      " + }, + "testSetGenerationStatus":{ + "shape":"TestSetGenerationStatus", + "documentation":"

      The status for the test set generation.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name used for the test set generation.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description used for the test set generation.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      The Amazon S3 storage location for the test set generation.

      " + }, + "generationDataSource":{ + "shape":"TestSetGenerationDataSource", + "documentation":"

      The data source for the test set generation.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The roleARN used for any operation in the test set to access resources in the Amazon Web Services account.

      " + }, + "testSetTags":{ + "shape":"TagMap", + "documentation":"

      A list of tags that was used for the test set that is being generated.

      " + } + } + }, + "StillWaitingResponseFrequency":{ "type":"integer", "max":300, "min":1 @@ -8533,105 +9918,653 @@ "documentation":"

      Specifications for the constituent sub slots of a composite slot.

      " } }, - "documentation":"

      Specifications for the constituent sub slots and the expression for the composite slot.

      " + "documentation":"

      Specifications for the constituent sub slots and the expression for the composite slot.

      " + }, + "SubSlotSpecificationMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"Specifications"}, + "max":6, + "min":0 + }, + "SubSlotTypeComposition":{ + "type":"structure", + "required":[ + "name", + "slotTypeId" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

      Name of a constituent sub slot inside a composite slot.

      " + }, + "slotTypeId":{ + "shape":"BuiltInOrCustomSlotTypeId", + "documentation":"

      The unique identifier assigned to a slot type. This refers to either a built-in slot type or the unique slotTypeId of a custom slot type.

      " + } + }, + "documentation":"

      Subslot type composition.

      " + }, + "SubSlotTypeList":{ + "type":"list", + "member":{"shape":"SubSlotTypeComposition"}, + "max":6, + "min":0 + }, + "SubSlotValueElicitationSetting":{ + "type":"structure", + "required":["promptSpecification"], + "members":{ + "defaultValueSpecification":{"shape":"SlotDefaultValueSpecification"}, + "promptSpecification":{"shape":"PromptSpecification"}, + "sampleUtterances":{ + "shape":"SampleUtterancesList", + "documentation":"

      If you know a specific pattern that users might respond to an Amazon Lex request for a sub slot value, you can provide those utterances to improve accuracy. This is optional. In most cases Amazon Lex is capable of understanding user utterances. This is similar to SampleUtterances for slots.

      " + }, + "waitAndContinueSpecification":{"shape":"WaitAndContinueSpecification"} + }, + "documentation":"

      Subslot elicitation settings.

      DefaultValueSpecification is a list of default values for a constituent sub slot in a composite slot. Default values are used when Amazon Lex hasn't determined a value for a slot. You can specify default values from context variables, session attributes, and defined values. This is similar to DefaultValueSpecification for slots.

      PromptSpecification is the prompt that Amazon Lex uses to elicit the sub slot value from the user. This is similar to PromptSpecification for slots.

      " + }, + "SynonymList":{ + "type":"list", + "member":{"shape":"SampleValue"}, + "max":10000, + "min":1 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceARN", + "tags" + ], + "members":{ + "resourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

      The Amazon Resource Name (ARN) of the bot, bot alias, or bot channel to tag.

      ", + "location":"uri", + "locationName":"resourceARN" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

      A list of tag keys to add to the resource. If a tag key already exists, the existing value is replaced with the new value.

      " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TestExecutionApiMode":{ + "type":"string", + "enum":[ + "Streaming", + "NonStreaming" + ] + }, + "TestExecutionModality":{ + "type":"string", + "enum":[ + "Text", + "Audio" + ] + }, + "TestExecutionResultFilterBy":{ + "type":"structure", + "required":["resultTypeFilter"], + "members":{ + "resultTypeFilter":{ + "shape":"TestResultTypeFilter", + "documentation":"

      Specifies which results to filter. See Test result details\">Test results details for details about different types of results.

      " + }, + "conversationLevelTestResultsFilterBy":{ + "shape":"ConversationLevelTestResultsFilterBy", + "documentation":"

      Contains information about the method for filtering Conversation level test results.

      " + } + }, + "documentation":"

      Contains information about the method by which to filter the results of the test execution.

      " + }, + "TestExecutionResultItems":{ + "type":"structure", + "members":{ + "overallTestResults":{ + "shape":"OverallTestResults", + "documentation":"

      Overall results for the test execution, including the breakdown of conversations and single-input utterances.

      " + }, + "conversationLevelTestResults":{ + "shape":"ConversationLevelTestResults", + "documentation":"

      Results related to conversations in the test set, including metrics about success and failure of conversations and intent and slot failures.

      " + }, + "intentClassificationTestResults":{ + "shape":"IntentClassificationTestResults", + "documentation":"

      Intent recognition results aggregated by intent name. The aggregated results contain success and failure rates of intent recognition, speech transcriptions, and end-to-end conversations.

      " + }, + "intentLevelSlotResolutionTestResults":{ + "shape":"IntentLevelSlotResolutionTestResults", + "documentation":"

      Slot resolution results aggregated by intent and slot name. The aggregated results contain success and failure rates of slot resolution, speech transcriptions, and end-to-end conversations

      " + }, + "utteranceLevelTestResults":{ + "shape":"UtteranceLevelTestResults", + "documentation":"

      Results related to utterances in the test set.

      " + } + }, + "documentation":"

      Contains the results of the test execution, grouped by type of results. See Test result details\">Test results details for details about different types of results.

      " + }, + "TestExecutionSortAttribute":{ + "type":"string", + "enum":[ + "TestSetName", + "CreationDateTime" + ] + }, + "TestExecutionSortBy":{ + "type":"structure", + "required":[ + "attribute", + "order" + ], + "members":{ + "attribute":{ + "shape":"TestExecutionSortAttribute", + "documentation":"

      Specifies whether to sort the test set executions by the date and time at which the test sets were created.

      " + }, + "order":{ + "shape":"SortOrder", + "documentation":"

      Specifies whether to sort in ascending or descending order.

      " + } + }, + "documentation":"

      Contains information about the method by which to sort the instances of test executions you have carried out.

      " + }, + "TestExecutionStatus":{ + "type":"string", + "enum":[ + "Pending", + "Waiting", + "InProgress", + "Completed", + "Failed", + "Stopping", + "Stopped" + ] + }, + "TestExecutionSummary":{ + "type":"structure", + "members":{ + "testExecutionId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test execution.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time at which the test execution was created.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time at which the test execution was last updated.

      " + }, + "testExecutionStatus":{ + "shape":"TestExecutionStatus", + "documentation":"

      The current status of the test execution.

      " + }, + "testSetId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set used in the test execution.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The name of the test set used in the test execution.

      " + }, + "target":{ + "shape":"TestExecutionTarget", + "documentation":"

      Contains information about the bot used for the test execution..

      " + }, + "apiMode":{ + "shape":"TestExecutionApiMode", + "documentation":"

      Specifies whether the API mode for the test execution is streaming or non-streaming.

      " + }, + "testExecutionModality":{ + "shape":"TestExecutionModality", + "documentation":"

      Specifies whether the data used for the test execution is written or spoken.

      " + } + }, + "documentation":"

      Summarizes metadata about the test execution.

      " + }, + "TestExecutionSummaryList":{ + "type":"list", + "member":{"shape":"TestExecutionSummary"} + }, + "TestExecutionTarget":{ + "type":"structure", + "members":{ + "botAliasTarget":{ + "shape":"BotAliasTestExecutionTarget", + "documentation":"

      Contains information about the bot alias used for the test execution.

      " + } + }, + "documentation":"

      Contains information about the bot used for the test execution.

      " + }, + "TestResultMatchStatus":{ + "type":"string", + "enum":[ + "Matched", + "Mismatched", + "ExecutionError" + ] + }, + "TestResultMatchStatusCountMap":{ + "type":"map", + "key":{"shape":"TestResultMatchStatus"}, + "value":{"shape":"Count"} + }, + "TestResultSlotName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([0-9a-zA-Z][_.-]?)+$" + }, + "TestResultTypeFilter":{ + "type":"string", + "enum":[ + "OverallTestResults", + "ConversationLevelTestResults", + "IntentClassificationTestResults", + "SlotResolutionTestResults", + "UtteranceLevelResults" + ] + }, + "TestSetAgentPrompt":{ + "type":"string", + "max":1024, + "min":1 + }, + "TestSetConversationId":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^([0-9a-zA-Z][_-]?)+$" + }, + "TestSetDiscrepancyErrors":{ + "type":"structure", + "required":[ + "intentDiscrepancies", + "slotDiscrepancies" + ], + "members":{ + "intentDiscrepancies":{ + "shape":"TestSetIntentDiscrepancyList", + "documentation":"

      Contains information about discrepancies found for intents between the test set and the bot.

      " + }, + "slotDiscrepancies":{ + "shape":"TestSetSlotDiscrepancyList", + "documentation":"

      Contains information about discrepancies found for slots between the test set and the bot.

      " + } + }, + "documentation":"

      Contains details about the errors in the test set discrepancy report

      " + }, + "TestSetDiscrepancyReportBotAliasTarget":{ + "type":"structure", + "required":[ + "botId", + "botAliasId", + "localeId" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

      The unique identifier for the bot alias.

      " + }, + "botAliasId":{ + "shape":"BotAliasId", + "documentation":"

      The unique identifier for the bot associated with the bot alias.

      " + }, + "localeId":{ + "shape":"LocaleId", + "documentation":"

      The unique identifier of the locale associated with the bot alias.

      " + } + }, + "documentation":"

      Contains information about the bot alias used for the test set discrepancy report.

      " + }, + "TestSetDiscrepancyReportResourceTarget":{ + "type":"structure", + "members":{ + "botAliasTarget":{ + "shape":"TestSetDiscrepancyReportBotAliasTarget", + "documentation":"

      Contains information about the bot alias used as the resource for the test set discrepancy report.

      " + } + }, + "documentation":"

      Contains information about the resource used for the test set discrepancy report.

      " + }, + "TestSetDiscrepancyReportStatus":{ + "type":"string", + "enum":[ + "InProgress", + "Completed", + "Failed" + ] + }, + "TestSetExportSpecification":{ + "type":"structure", + "required":["testSetId"], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set.

      " + } + }, + "documentation":"

      Contains information about the test set that is exported.

      " + }, + "TestSetGenerationDataSource":{ + "type":"structure", + "members":{ + "conversationLogsDataSource":{ + "shape":"ConversationLogsDataSource", + "documentation":"

      Contains information about the bot from which the conversation logs are sourced.

      " + } + }, + "documentation":"

      Contains information about the data source from which the test set is generated.

      " + }, + "TestSetGenerationStatus":{ + "type":"string", + "enum":[ + "Generating", + "Ready", + "Failed", + "Pending" + ] + }, + "TestSetImportInputLocation":{ + "type":"structure", + "required":[ + "s3BucketName", + "s3Path" + ], + "members":{ + "s3BucketName":{ + "shape":"S3BucketName", + "documentation":"

      The name of the Amazon S3 bucket.

      " + }, + "s3Path":{ + "shape":"S3ObjectPath", + "documentation":"

      The path inside the Amazon S3 bucket pointing to the test-set CSV file.

      " + } + }, + "documentation":"

      Contains information about the Amazon S3 location from which the test set is imported.

      " + }, + "TestSetImportResourceSpecification":{ + "type":"structure", + "required":[ + "testSetName", + "roleArn", + "storageLocation", + "importInputLocation", + "modality" + ], + "members":{ + "testSetName":{ + "shape":"Name", + "documentation":"

      The name of the test set.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the test set.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The Amazon Resource Name (ARN) of an IAM role that has permission to access the test set.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      Contains information about the location that Amazon Lex uses to store the test-set.

      " + }, + "importInputLocation":{ + "shape":"TestSetImportInputLocation", + "documentation":"

      Contains information about the input location from where test-set should be imported.

      " + }, + "modality":{ + "shape":"TestSetModality", + "documentation":"

      Specifies whether the test-set being imported contains written or spoken data.

      " + }, + "testSetTags":{ + "shape":"TagMap", + "documentation":"

      A list of tags to add to the test set. You can only add tags when you import/generate a new test set. You can't use the UpdateTestSet operation to update tags. To update tags, use the TagResource operation.

      " + } + }, + "documentation":"

      Contains information about the test set that is imported.

      " + }, + "TestSetIntentDiscrepancyItem":{ + "type":"structure", + "required":[ + "intentName", + "errorMessage" + ], + "members":{ + "intentName":{ + "shape":"Name", + "documentation":"

      The name of the intent in the discrepancy report.

      " + }, + "errorMessage":{ + "shape":"String", + "documentation":"

      The error message for a discrepancy for an intent between the test set and the bot.

      " + } + }, + "documentation":"

      Contains information about discrepancy in an intent information between the test set and the bot.

      " + }, + "TestSetIntentDiscrepancyList":{ + "type":"list", + "member":{"shape":"TestSetIntentDiscrepancyItem"} + }, + "TestSetModality":{ + "type":"string", + "enum":[ + "Text", + "Audio" + ] + }, + "TestSetSlotDiscrepancyItem":{ + "type":"structure", + "required":[ + "intentName", + "slotName", + "errorMessage" + ], + "members":{ + "intentName":{ + "shape":"Name", + "documentation":"

      The name of the intent associated with the slot in the discrepancy report.

      " + }, + "slotName":{ + "shape":"Name", + "documentation":"

      The name of the slot in the discrepancy report.

      " + }, + "errorMessage":{ + "shape":"String", + "documentation":"

      The error message for a discrepancy for an intent between the test set and the bot.

      " + } + }, + "documentation":"

      Contains information about discrepancy in a slot information between the test set and the bot.

      " + }, + "TestSetSlotDiscrepancyList":{ + "type":"list", + "member":{"shape":"TestSetSlotDiscrepancyItem"} + }, + "TestSetSortAttribute":{ + "type":"string", + "enum":[ + "TestSetName", + "LastUpdatedDateTime" + ] + }, + "TestSetSortBy":{ + "type":"structure", + "required":[ + "attribute", + "order" + ], + "members":{ + "attribute":{ + "shape":"TestSetSortAttribute", + "documentation":"

      Specifies whether to sort the test sets by name or by the time they were last updated.

      " + }, + "order":{ + "shape":"SortOrder", + "documentation":"

      Specifies whether to sort in ascending or descending order.

      " + } + }, + "documentation":"

      Contains information about the methods by which to sort the test set.

      " }, - "SubSlotSpecificationMap":{ - "type":"map", - "key":{"shape":"Name"}, - "value":{"shape":"Specifications"}, - "max":6, - "min":0 + "TestSetStatus":{ + "type":"string", + "enum":[ + "Importing", + "PendingAnnotation", + "Deleting", + "ValidationError", + "Ready" + ] }, - "SubSlotTypeComposition":{ + "TestSetStorageLocation":{ "type":"structure", "required":[ - "name", - "slotTypeId" + "s3BucketName", + "s3Path" ], "members":{ - "name":{ - "shape":"Name", - "documentation":"

      Name of a constituent sub slot inside a composite slot.

      " + "s3BucketName":{ + "shape":"S3BucketName", + "documentation":"

      The name of the Amazon S3 bucket in which the test set is stored.

      " }, - "slotTypeId":{ - "shape":"BuiltInOrCustomSlotTypeId", - "documentation":"

      The unique identifier assigned to a slot type. This refers to either a built-in slot type or the unique slotTypeId of a custom slot type.

      " + "s3Path":{ + "shape":"S3ObjectPath", + "documentation":"

      The path inside the Amazon S3 bucket where the test set is stored.

      " + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

      The Amazon Resource Name (ARN) of an Amazon Web Services Key Management Service (KMS) key for encrypting the test set.

      " } }, - "documentation":"

      Subslot type composition.

      " - }, - "SubSlotTypeList":{ - "type":"list", - "member":{"shape":"SubSlotTypeComposition"}, - "max":6, - "min":0 + "documentation":"

      Contains information about the location in which the test set is stored.

      " }, - "SubSlotValueElicitationSetting":{ + "TestSetSummary":{ "type":"structure", - "required":["promptSpecification"], "members":{ - "defaultValueSpecification":{"shape":"SlotDefaultValueSpecification"}, - "promptSpecification":{"shape":"PromptSpecification"}, - "sampleUtterances":{ - "shape":"SampleUtterancesList", - "documentation":"

      If you know a specific pattern that users might respond to an Amazon Lex request for a sub slot value, you can provide those utterances to improve accuracy. This is optional. In most cases Amazon Lex is capable of understanding user utterances. This is similar to SampleUtterances for slots.

      " + "testSetId":{ + "shape":"Id", + "documentation":"

      The unique identifier of the test set.

      " }, - "waitAndContinueSpecification":{"shape":"WaitAndContinueSpecification"} + "testSetName":{ + "shape":"Name", + "documentation":"

      The name of the test set.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the test set.

      " + }, + "modality":{ + "shape":"TestSetModality", + "documentation":"

      Specifies whether the test set contains written or spoken data.

      " + }, + "status":{ + "shape":"TestSetStatus", + "documentation":"

      The status of the test set.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The Amazon Resource Name (ARN) of an IAM role that has permission to access the test set.

      " + }, + "numTurns":{ + "shape":"Count", + "documentation":"

      The number of turns in the test set.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      Contains information about the location at which the test set is stored.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time at which the test set was created.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time at which the test set was last updated.

      " + } }, - "documentation":"

      Subslot elicitation settings.

      DefaultValueSpecification is a list of default values for a constituent sub slot in a composite slot. Default values are used when Amazon Lex hasn't determined a value for a slot. You can specify default values from context variables, session attributes, and defined values. This is similar to DefaultValueSpecification for slots.

      PromptSpecification is the prompt that Amazon Lex uses to elicit the sub slot value from the user. This is similar to PromptSpecification for slots.

      " - }, - "SynonymList":{ - "type":"list", - "member":{"shape":"SampleValue"}, - "max":10000, - "min":1 + "documentation":"

      Contains summary information about the test set.

      " }, - "TagKey":{ - "type":"string", - "max":128, - "min":1 - }, - "TagKeyList":{ + "TestSetSummaryList":{ "type":"list", - "member":{"shape":"TagKey"}, - "max":200, - "min":0 - }, - "TagMap":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"}, - "max":200, - "min":0 + "member":{"shape":"TestSetSummary"} }, - "TagResourceRequest":{ + "TestSetTurnRecord":{ "type":"structure", "required":[ - "resourceARN", - "tags" + "recordNumber", + "turnSpecification" ], "members":{ - "resourceARN":{ - "shape":"AmazonResourceName", - "documentation":"

      The Amazon Resource Name (ARN) of the bot, bot alias, or bot channel to tag.

      ", - "location":"uri", - "locationName":"resourceARN" + "recordNumber":{ + "shape":"RecordNumber", + "documentation":"

      The record number associated with the turn.

      " }, - "tags":{ - "shape":"TagMap", - "documentation":"

      A list of tag keys to add to the resource. If a tag key already exists, the existing value is replaced with the new value.

      " + "conversationId":{ + "shape":"TestSetConversationId", + "documentation":"

      The unique identifier for the conversation associated with the turn.

      " + }, + "turnNumber":{ + "shape":"TurnNumber", + "documentation":"

      The number of turns that has elapsed up to that turn.

      " + }, + "turnSpecification":{ + "shape":"TurnSpecification", + "documentation":"

      Contains information about the agent or user turn depending upon type of turn.

      " } - } + }, + "documentation":"

      Contains information about a turn in a test set.

      " }, - "TagResourceResponse":{ + "TestSetTurnRecordList":{ + "type":"list", + "member":{"shape":"TestSetTurnRecord"} + }, + "TestSetTurnResult":{ "type":"structure", "members":{ - } + "agent":{ + "shape":"AgentTurnResult", + "documentation":"

      Contains information about the agent messages in the turn.

      " + }, + "user":{ + "shape":"UserTurnResult", + "documentation":"

      Contains information about the user messages in the turn.

      " + } + }, + "documentation":"

      Contains information about the results of the analysis of a turn in the test set.

      " }, - "TagValue":{ + "TestSetUtteranceText":{ "type":"string", - "max":256, - "min":0 + "max":1024, + "min":1 }, "TextInputSpecification":{ "type":"structure", @@ -8739,6 +10672,25 @@ }, "documentation":"

      Indicates the setting of the location where the transcript is stored.

      " }, + "TurnNumber":{ + "type":"integer", + "max":30, + "min":0 + }, + "TurnSpecification":{ + "type":"structure", + "members":{ + "agentTurn":{ + "shape":"AgentTurnSpecification", + "documentation":"

      Contains information about the agent messages in the turn.

      " + }, + "userTurn":{ + "shape":"UserTurnSpecification", + "documentation":"

      Contains information about the user messages in the turn.

      " + } + }, + "documentation":"

      Contains information about the messages in the turn.

      " + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -9259,7 +11211,7 @@ }, "initialResponseSetting":{ "shape":"InitialResponseSetting", - "documentation":"

      " + "documentation":"

      Configuration settings for a response sent to the user before Amazon Lex starts eliciting slots.

      " } } }, @@ -9340,7 +11292,7 @@ }, "initialResponseSetting":{ "shape":"InitialResponseSetting", - "documentation":"

      " + "documentation":"

      Configuration settings for a response sent to the user before Amazon Lex starts eliciting slots.

      " } } }, @@ -9487,7 +11439,7 @@ }, "botVersion":{ "shape":"DraftBotVersion", - "documentation":"

      The identifier of the slot version that contains the slot. Will always be DRAFT.

      " + "documentation":"

      The version of the bot that contains the slot. Will always be DRAFT.

      " }, "localeId":{ "shape":"LocaleId", @@ -9630,6 +11582,215 @@ } } }, + "UpdateTestSetRequest":{ + "type":"structure", + "required":[ + "testSetId", + "testSetName" + ], + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for which update test operation to be performed.

      ", + "location":"uri", + "locationName":"testSetId" + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The new test set name.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The new test set description.

      " + } + } + }, + "UpdateTestSetResponse":{ + "type":"structure", + "members":{ + "testSetId":{ + "shape":"Id", + "documentation":"

      The test set Id for which update test operation to be performed.

      " + }, + "testSetName":{ + "shape":"Name", + "documentation":"

      The test set name for the updated test set.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The test set description for the updated test set.

      " + }, + "modality":{ + "shape":"TestSetModality", + "documentation":"

      Indicates whether audio or text is used for the updated test set.

      " + }, + "status":{ + "shape":"TestSetStatus", + "documentation":"

      The status for the updated test set.

      " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The roleARN used for any operation in the test set to access resources in the Amazon Web Services account.

      " + }, + "numTurns":{ + "shape":"Count", + "documentation":"

      The number of conversation turns from the updated test set.

      " + }, + "storageLocation":{ + "shape":"TestSetStorageLocation", + "documentation":"

      The Amazon S3 storage location for the updated test set.

      " + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

      The creation date and time for the updated test set.

      " + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

      The date and time of the last update for the updated test set.

      " + } + } + }, + "UserTurnInputSpecification":{ + "type":"structure", + "required":["utteranceInput"], + "members":{ + "utteranceInput":{ + "shape":"UtteranceInputSpecification", + "documentation":"

      The utterance input in the user turn.

      " + }, + "requestAttributes":{ + "shape":"StringMap", + "documentation":"

      Request attributes of the user turn.

      " + }, + "sessionState":{ + "shape":"InputSessionStateSpecification", + "documentation":"

      Contains information about the session state in the input.

      " + } + }, + "documentation":"

      Contains information about the user messages in the turn in the input.

      " + }, + "UserTurnIntentOutput":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

      The name of the intent.

      " + }, + "slots":{ + "shape":"UserTurnSlotOutputMap", + "documentation":"

      The slots associated with the intent.

      " + } + }, + "documentation":"

      Contains information about the intent that is output for the turn by the test execution.

      " + }, + "UserTurnOutputSpecification":{ + "type":"structure", + "required":["intent"], + "members":{ + "intent":{ + "shape":"UserTurnIntentOutput", + "documentation":"

      Contains information about the intent.

      " + }, + "activeContexts":{ + "shape":"ActiveContextList", + "documentation":"

      The contexts that are active in the turn.

      " + }, + "transcript":{ + "shape":"TestSetUtteranceText", + "documentation":"

      The transcript that is output for the user turn by the test execution.

      " + } + }, + "documentation":"

      Contains results that are output for the user turn by the test execution.

      " + }, + "UserTurnResult":{ + "type":"structure", + "required":[ + "input", + "expectedOutput" + ], + "members":{ + "input":{ + "shape":"UserTurnInputSpecification", + "documentation":"

      Contains information about the user messages in the turn in the input.

      " + }, + "expectedOutput":{ + "shape":"UserTurnOutputSpecification", + "documentation":"

      Contains information about the expected output for the user turn.

      " + }, + "actualOutput":{ + "shape":"UserTurnOutputSpecification", + "documentation":"

      Contains information about the actual output for the user turn.

      " + }, + "errorDetails":{"shape":"ExecutionErrorDetails"}, + "endToEndResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      Specifies whether the expected and actual outputs match or not, or if there is an error in execution.

      " + }, + "intentMatchResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      Specifies whether the expected and actual intents match or not.

      " + }, + "slotMatchResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      Specifies whether the expected and actual slots match or not.

      " + }, + "speechTranscriptionResult":{ + "shape":"TestResultMatchStatus", + "documentation":"

      Specifies whether the expected and actual speech transcriptions match or not, or if there is an error in execution.

      " + }, + "conversationLevelResult":{ + "shape":"ConversationLevelResultDetail", + "documentation":"

      Contains information about the results related to the conversation associated with the user turn.

      " + } + }, + "documentation":"

      Contains the results for the user turn by the test execution.

      " + }, + "UserTurnSlotOutput":{ + "type":"structure", + "members":{ + "value":{ + "shape":"NonEmptyString", + "documentation":"

      The value output by the slot recognition.

      " + }, + "values":{ + "shape":"UserTurnSlotOutputList", + "documentation":"

      Values that are output by the slot recognition.

      " + }, + "subSlots":{ + "shape":"UserTurnSlotOutputMap", + "documentation":"

      A list of items mapping the name of the subslots to information about those subslots.

      " + } + }, + "documentation":"

      Contains information about a slot output by the test set execution.

      " + }, + "UserTurnSlotOutputList":{ + "type":"list", + "member":{"shape":"UserTurnSlotOutput"} + }, + "UserTurnSlotOutputMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"UserTurnSlotOutput"} + }, + "UserTurnSpecification":{ + "type":"structure", + "required":[ + "input", + "expected" + ], + "members":{ + "input":{ + "shape":"UserTurnInputSpecification", + "documentation":"

      Contains information about the user messages in the turn in the input.

      " + }, + "expected":{ + "shape":"UserTurnOutputSpecification", + "documentation":"

      Contains results about the expected output for the user turn.

      " + } + }, + "documentation":"

      Contains information about the expected and input values for the user turn.

      " + }, "Utterance":{"type":"string"}, "UtteranceAggregationDuration":{ "type":"structure", @@ -9642,6 +11803,68 @@ }, "documentation":"

      Provides parameters for setting the time window and duration for aggregating utterance data.

      " }, + "UtteranceAudioInputSpecification":{ + "type":"structure", + "required":["audioFileS3Location"], + "members":{ + "audioFileS3Location":{ + "shape":"AudioFileS3Location", + "documentation":"

      Amazon S3 file pointing to the audio.

      " + } + }, + "documentation":"

      Contains information about the audio for an utterance.

      " + }, + "UtteranceInputSpecification":{ + "type":"structure", + "members":{ + "textInput":{ + "shape":"TestSetUtteranceText", + "documentation":"

      A text input transcription of the utterance. It is only applicable for test-sets containing text data.

      " + }, + "audioInput":{ + "shape":"UtteranceAudioInputSpecification", + "documentation":"

      Contains information about the audio input for an utterance.

      " + } + }, + "documentation":"

      Contains information about input of an utterance.

      " + }, + "UtteranceLevelTestResultItem":{ + "type":"structure", + "required":[ + "recordNumber", + "turnResult" + ], + "members":{ + "recordNumber":{ + "shape":"RecordNumber", + "documentation":"

      The record number of the result.

      " + }, + "conversationId":{ + "shape":"TestSetConversationId", + "documentation":"

      The unique identifier for the conversation associated with the result.

      " + }, + "turnResult":{ + "shape":"TestSetTurnResult", + "documentation":"

      Contains information about the turn associated with the result.

      " + } + }, + "documentation":"

      Contains information about multiple utterances in the results of a test set execution.

      " + }, + "UtteranceLevelTestResultItemList":{ + "type":"list", + "member":{"shape":"UtteranceLevelTestResultItem"} + }, + "UtteranceLevelTestResults":{ + "type":"structure", + "required":["items"], + "members":{ + "items":{ + "shape":"UtteranceLevelTestResultItemList", + "documentation":"

      Contains information about an utterance in the results of the test set execution.

      " + } + }, + "documentation":"

      Contains information about the utterances in the results of the test set execution.

      " + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 756379ce0c93..a3188782290c 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index 90ce419e8f72..1dcf19a596b0 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index 2e5af71a418a..a94b618ad737 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index 6c2ecf6c507b..82224ec1121a 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index 50e12de5024f..e1d278c4121f 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 9e4091c5dbac..ed4fec81ce3d 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json b/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json index ef0830464521..0b3e73dea42e 100644 --- a/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json @@ -325,6 +325,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -338,6 +349,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -351,6 +373,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -364,6 +397,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -427,6 +471,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index fba5ddf088e2..dbebbe0c225b 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -1311,7 +1311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

      Returns information about one or more Amazon Lightsail SSL/TLS certificates.

      To get a summary of a certificate, ommit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

      " + "documentation":"

      Returns information about one or more Amazon Lightsail SSL/TLS certificates.

      To get a summary of a certificate, omit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

      " }, "GetCloudFormationStackRecords":{ "name":"GetCloudFormationStackRecords", @@ -3960,7 +3960,7 @@ "documentation":"

      The support code. Include this code in your email to support when you have questions about your Lightsail certificate. This code enables our support team to look up your Lightsail information more easily.

      " } }, - "documentation":"

      Describes the full details of an Amazon Lightsail SSL/TLS certificate.

      To get a summary of a certificate, use the GetCertificates action and ommit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

      " + "documentation":"

      Describes the full details of an Amazon Lightsail SSL/TLS certificate.

      To get a summary of a certificate, use the GetCertificates action and omit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

      " }, "CertificateDomainValidationStatus":{ "type":"string", @@ -7139,6 +7139,10 @@ "certificateName":{ "shape":"CertificateName", "documentation":"

      The name for the certificate for which to return information.

      When omitted, the response includes all of your certificates in the Amazon Web Services Region where the request is made.

      " + }, + "pageToken":{ + "shape":"string", + "documentation":"

      The token to advance to the next page of results from your request.

      To get a page token, perform an initial GetCertificates request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.

      " } } }, @@ -7148,6 +7152,10 @@ "certificates":{ "shape":"CertificateSummaryList", "documentation":"

      An object that describes certificates.

      " + }, + "nextPageToken":{ + "shape":"string", + "documentation":"

      If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

      " } } }, @@ -7376,11 +7384,11 @@ }, "startTime":{ "shape":"IsoDate", - "documentation":"

      The cost estimate start time.

      Constraints:

      • Specified in Coordinated Universal Time (UTC).

      • Specified in the Unix time format.

        For example, if you wish to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the start time.

      You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

      " + "documentation":"

      The cost estimate start time.

      Constraints:

      • Specified in Coordinated Universal Time (UTC).

      • Specified in the Unix time format.

        For example, if you want to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the start time.

      You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

      " }, "endTime":{ "shape":"IsoDate", - "documentation":"

      The cost estimate end time.

      Constraints:

      • Specified in Coordinated Universal Time (UTC).

      • Specified in the Unix time format.

        For example, if you wish to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end time.

      You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

      " + "documentation":"

      The cost estimate end time.

      Constraints:

      • Specified in Coordinated Universal Time (UTC).

      • Specified in the Unix time format.

        For example, if you want to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end time.

      You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

      " } } }, diff --git a/services/location/pom.xml b/services/location/pom.xml index b5f8130af9c6..4eacf70b52a6 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/location/src/main/resources/codegen-resources/customization.config b/services/location/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/location/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/location/src/main/resources/codegen-resources/endpoint-tests.json b/services/location/src/main/resources/codegen-resources/endpoint-tests.json index 507a3285992f..2281499b47f3 100644 --- a/services/location/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/location/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -152,8 +152,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -165,8 +176,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -178,8 +200,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -191,8 +224,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -204,8 +248,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +261,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -230,8 +274,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -242,8 +286,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -254,10 +298,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/location/src/main/resources/codegen-resources/service-2.json b/services/location/src/main/resources/codegen-resources/service-2.json index b68e8bf6ed55..e58db3bed5ba 100644 --- a/services/location/src/main/resources/codegen-resources/service-2.json +++ b/services/location/src/main/resources/codegen-resources/service-2.json @@ -144,7 +144,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

      Uploads position update data for one or more devices to a tracker resource. Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.

      Position updates are handled based on the PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, updates are evaluated against linked geofence collections, and location data is stored at a maximum of one position per 30 second interval. If your update frequency is more often than every 30 seconds, only one update per 30 seconds is stored for each unique device ID.

      When PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than 30 m (98.4 ft).

      When PositionFiltering is set to AccuracyBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than the measured accuracy. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate.

      ", + "documentation":"

      Uploads position update data for one or more devices to a tracker resource (up to 10 devices per batch). Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.

      Position updates are handled based on the PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, updates are evaluated against linked geofence collections, and location data is stored at a maximum of one position per 30 second interval. If your update frequency is more often than every 30 seconds, only one update per 30 seconds is stored for each unique device ID.

      When PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than 30 m (98.4 ft).

      When PositionFiltering is set to AccuracyBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than the measured accuracy. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate.

      ", "endpoint":{"hostPrefix":"tracking."} }, "CalculateRoute":{ @@ -304,6 +304,7 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], "documentation":"

      Creates a tracker resource in your Amazon Web Services account, which lets you retrieve current and historical location of devices.

      ", @@ -1160,7 +1161,9 @@ }, "ApiKeyAction":{ "type":"string", - "pattern":"^geo:GetMap\\*$" + "max":200, + "min":5, + "pattern":"^geo:\\w*\\*?$" }, "ApiKeyFilter":{ "type":"structure", @@ -1197,7 +1200,7 @@ "ApiKeyRestrictionsAllowActionsList":{ "type":"list", "member":{"shape":"ApiKeyAction"}, - "max":1, + "max":7, "min":1 }, "ApiKeyRestrictionsAllowReferersList":{ @@ -1571,6 +1574,10 @@ "shape":"Id", "documentation":"

      The identifier for the geofence to be stored in a given geofence collection.

      " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

      Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

      " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

      Contains the details of the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

      Each geofence polygon can have a maximum of 1,000 vertices.

      " @@ -1664,7 +1671,7 @@ }, "Updates":{ "shape":"BatchUpdateDevicePositionRequestUpdatesList", - "documentation":"

      Contains the position update details for each device.

      " + "documentation":"

      Contains the position update details for each device, up to 10 devices.

      " } } }, @@ -2032,6 +2039,18 @@ "type":"string", "pattern":"^[A-Z]{3}$" }, + "CountryCode3":{ + "type":"string", + "max":3, + "min":3, + "pattern":"^[A-Z]{3}$" + }, + "CountryCode3OrEmpty":{ + "type":"string", + "max":3, + "min":0, + "pattern":"^[A-Z]{3}$|^$" + }, "CountryCodeList":{ "type":"list", "member":{"shape":"CountryCode"}, @@ -2255,7 +2274,7 @@ "documentation":"

      The timestamp for when the place index resource was created in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ.

      " }, "IndexArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across Amazon Web Services.

      • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

      " }, "IndexName":{ @@ -2304,7 +2323,7 @@ ], "members":{ "CalculatorArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) for the route calculator resource. Use the ARN when you specify a resource across all Amazon Web Services.

      • Format example: arn:aws:geo:region:account-id:route-calculator/ExampleCalculator

      " }, "CalculatorName":{ @@ -2717,7 +2736,7 @@ "documentation":"

      The optional description for the place index resource.

      " }, "IndexArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) for the place index resource. Used to specify a resource across Amazon Web Services.

      • Format example: arn:aws:geo:region:account-id:place-index/ExamplePlaceIndex

      " }, "IndexName":{ @@ -2764,7 +2783,7 @@ ], "members":{ "CalculatorArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) for the Route calculator resource. Use the ARN when you specify a resource across Amazon Web Services.

      • Format example: arn:aws:geo:region:account-id:route-calculator/ExampleCalculator

      " }, "CalculatorName":{ @@ -2981,6 +3000,12 @@ "type":"double", "box":true }, + "FilterPlaceCategoryList":{ + "type":"list", + "member":{"shape":"PlaceCategory"}, + "max":5, + "min":1 + }, "GeoArn":{ "type":"string", "max":1600, @@ -3152,6 +3177,10 @@ "shape":"Id", "documentation":"

      The geofence identifier.

      " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

      Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

      " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

      Contains the geofence geometry details describing a polygon or a circle.

      " @@ -3176,7 +3205,7 @@ "members":{ "FontStack":{ "shape":"String", - "documentation":"

      A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

      Valid fonts stacks for Esri styles:

      • VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold

      • VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold

      • VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic

      • VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold

      • VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold

      Valid font stacks for HERE Technologies styles:

      • VectorHereContrast – Fira GO Regular | Fira GO Bold

      • VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular

      Valid font stacks for GrabMaps styles:

      • VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold

      Valid font stacks for Open Data styles:

      • VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold

      The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

      ", + "documentation":"

      A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

      Valid fonts stacks for Esri styles:

      • VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold

      • VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold

      • VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic

      • VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold

      • VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold

      Valid font stacks for HERE Technologies styles:

      • VectorHereContrast – Fira GO Regular | Fira GO Bold

      • VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular

      Valid font stacks for GrabMaps styles:

      • VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold

      Valid font stacks for Open Data styles:

      • VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold | Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold | Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold | Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular | Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular | Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium

      The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

      ", "location":"uri", "locationName":"FontStack" }, @@ -3719,6 +3748,10 @@ "shape":"Id", "documentation":"

      The geofence identifier.

      " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

      Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

      " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

      Contains the geofence geometry details describing a polygon or a circle.

      " @@ -4223,13 +4256,27 @@ "type":"structure", "required":["Style"], "members":{ + "PoliticalView":{ + "shape":"CountryCode3", + "documentation":"

      Specifies the political view for the style. Leave unset to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

      Default is unset.

      Not all map resources or styles support political view styles. See Political views for more information.

      " + }, "Style":{ "shape":"MapStyle", - "documentation":"

      Specifies the map style selected from an available data provider.

      Valid Esri map styles:

      • VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.

      • RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.

      • VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.

      • VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.

      • VectorEsriStreets – The Esri World Streets map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.

      • VectorEsriNavigation – The Esri World Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices.

      Valid HERE Technologies map styles:

      • VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.

        The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it.

      • VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan.

      • VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics.

      • RasterHereExploreSatellite – A global map containing high resolution satellite imagery.

      • HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved.

        Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved.

      Valid GrabMaps map styles:

      • VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia.

      • VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia.

      Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

      Valid Open Data map styles:

      • VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

      • VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

      • VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data.

      • VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data.

      " + "documentation":"

      Specifies the map style selected from an available data provider.

      Valid Esri map styles:

      • VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.

      • RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.

      • VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.

      • VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.

      • VectorEsriStreets – The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.

      • VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices.

      Valid HERE Technologies map styles:

      • VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.

        The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it.

      • VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan.

      • VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics.

      • RasterHereExploreSatellite – A global map containing high resolution satellite imagery.

      • HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved.

        Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved.

      Valid GrabMaps map styles:

      • VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia.

      • VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia.

      Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

      Valid Open Data map styles:

      • VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

      • VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.

      • VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data.

      • VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data.

      " } }, "documentation":"

      Specifies the map tile style selected from an available provider.

      " }, + "MapConfigurationUpdate":{ + "type":"structure", + "members":{ + "PoliticalView":{ + "shape":"CountryCode3OrEmpty", + "documentation":"

      Specifies the political view for the style. Set to an empty string to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

      Not all map resources or styles support political view styles. See Political views for more information.

      " + } + }, + "documentation":"

      Specifies the political view for the style.

      " + }, "MapStyle":{ "type":"string", "max":100, @@ -4244,6 +4291,10 @@ "shape":"String", "documentation":"

      The numerical portion of an address, such as a building number.

      " }, + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

      The Amazon Location categories that describe this Place.

      For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

      " + }, "Country":{ "shape":"String", "documentation":"

      A country/region specified using ISO 3166 3-digit country/region code. For example, CAN.

      " @@ -4281,21 +4332,36 @@ "shape":"String", "documentation":"

      A county, or an area that's part of a larger region. For example, Metro Vancouver.

      " }, + "SupplementalCategories":{ + "shape":"PlaceSupplementalCategoryList", + "documentation":"

      Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

      " + }, "TimeZone":{ "shape":"TimeZone", - "documentation":"

      The time zone in which the Place is located. Returned only when using HERE as the selected partner.

      " + "documentation":"

      The time zone in which the Place is located. Returned only when using HERE or Grab as the selected partner.

      " }, "UnitNumber":{ "shape":"String", - "documentation":"

      For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

      Returned only for a place index that uses Esri as a data provider. Is not returned for SearchPlaceIndexForPosition.

      " + "documentation":"

      For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

      Returned only for a place index that uses Esri or Grab as a data provider. Is not returned for SearchPlaceIndexForPosition.

      " }, "UnitType":{ "shape":"String", - "documentation":"

      For addresses with a UnitNumber, the type of unit. For example, Apartment.

      " + "documentation":"

      For addresses with a UnitNumber, the type of unit. For example, Apartment.

      Returned only for a place index that uses Esri as a data provider.

      " } }, "documentation":"

      Contains details about addresses or points of interest that match the search criteria.

      Not all details are included with all responses. Some details may only be returned by specific data partners.

      " }, + "PlaceCategory":{ + "type":"string", + "max":35, + "min":0 + }, + "PlaceCategoryList":{ + "type":"list", + "member":{"shape":"PlaceCategory"}, + "max":10, + "min":1 + }, "PlaceGeometry":{ "type":"structure", "members":{ @@ -4312,6 +4378,17 @@ "max":50, "min":1 }, + "PlaceSupplementalCategory":{ + "type":"string", + "max":35, + "min":0 + }, + "PlaceSupplementalCategoryList":{ + "type":"list", + "member":{"shape":"PlaceSupplementalCategory"}, + "max":10, + "min":1 + }, "Position":{ "type":"list", "member":{"shape":"Double"}, @@ -4390,6 +4467,10 @@ "location":"uri", "locationName":"GeofenceId" }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

      Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

      " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

      Contains the details to specify the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

      Each geofence polygon can have a maximum of 1,000 vertices.

      " @@ -4548,9 +4629,17 @@ "type":"structure", "required":["Text"], "members":{ + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

      The Amazon Location categories that describe the Place.

      For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

      " + }, "PlaceId":{ "shape":"PlaceId", - "documentation":"

      The unique identifier of the place. You can use this with the GetPlace operation to find the place again later.

      For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

      " + "documentation":"

      The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

      The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

      For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

      " + }, + "SupplementalCategories":{ + "shape":"PlaceSupplementalCategoryList", + "documentation":"

      Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

      " }, "Text":{ "shape":"String", @@ -4686,6 +4775,10 @@ "shape":"BoundingBox", "documentation":"

      An optional parameter that limits the search results by returning only suggestions within a specified bounding box.

      If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

      For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

      FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

      " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

      A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

      For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

      " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

      An optional parameter that limits the search results by returning only suggestions within the provided list of countries.

      • Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS.

      " @@ -4758,6 +4851,10 @@ "shape":"BoundingBox", "documentation":"

      Contains the coordinates for the optional bounding box specified in the request.

      " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

      The optional category filter specified in the request.

      " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

      Contains the optional country filter specified in the request.

      " @@ -4792,6 +4889,10 @@ "shape":"BoundingBox", "documentation":"

      An optional parameter that limits the search results by returning only places that are within the provided bounding box.

      If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

      For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

      FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

      " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

      A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

      For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

      " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

      An optional parameter that limits the search results by returning only places that are in a specified list of countries.

      • Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS.

      " @@ -4858,6 +4959,10 @@ "shape":"BoundingBox", "documentation":"

      Contains the coordinates for the optional bounding box specified in the request.

      " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

      The optional category filter specified in the request.

      " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

      Contains the optional country filter specified in the request.

      " @@ -5251,6 +5356,10 @@ "type":"structure", "required":["MapName"], "members":{ + "ConfigurationUpdate":{ + "shape":"MapConfigurationUpdate", + "documentation":"

      Updates the parts of the map configuration that can be updated, including the political view.

      " + }, "Description":{ "shape":"ResourceDescription", "documentation":"

      Updates the description for the map resource.

      " @@ -5326,7 +5435,7 @@ ], "members":{ "IndexArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) of the upated place index resource. Used to specify a resource across Amazon Web Services.

      • Format example: arn:aws:geo:region:account-id:place- index/ExamplePlaceIndex

      " }, "IndexName":{ @@ -5370,7 +5479,7 @@ ], "members":{ "CalculatorArn":{ - "shape":"Arn", + "shape":"GeoArn", "documentation":"

      The Amazon Resource Name (ARN) of the updated route calculator resource. Used to specify a resource across AWS.

      • Format example: arn:aws:geo:region:account-id:route- calculator/ExampleCalculator

      " }, "CalculatorName":{ diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 6e011955bb1e..e33ec7a51bc2 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index d47f1c4d0060..af638a1561c7 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index d250c1564991..d7fb105b82d5 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index 1db12a98998c..4c2a7fd752d4 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/m2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/m2/src/main/resources/codegen-resources/endpoint-rule-set.json index 8c0c66bd0d15..ef3146718068 100644 --- a/services/m2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/m2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,154 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://m2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://m2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://m2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://m2-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://m2.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -286,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://m2.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://m2.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -295,28 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://m2.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/m2/src/main/resources/codegen-resources/endpoint-tests.json b/services/m2/src/main/resources/codegen-resources/endpoint-tests.json index 935c615f33d3..46b2e6c2c5d8 100644 --- a/services/m2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/m2/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,211 +1,248 @@ { "testCases": [ { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://m2.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.us-isob-east-1.sc2s.sgov.gov" + "url": "https://m2.ca-central-1.amazonaws.com" } }, "params": { - "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2-fips.us-gov-east-1.api.aws" + "url": "https://m2.eu-central-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2-fips.us-gov-east-1.amazonaws.com" + "url": "https://m2.eu-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.us-gov-east-1.api.aws" + "url": "https://m2.sa-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.us-gov-east-1.amazonaws.com" + "url": "https://m2.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.sa-east-1.amazonaws.com" + "url": "https://m2.us-west-2.amazonaws.com" } }, "params": { - "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://m2.ca-central-1.amazonaws.com" + "url": "https://m2-fips.us-east-1.api.aws" } }, "params": { - "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.us-east-1.amazonaws.com" + "url": "https://m2-fips.us-east-1.amazonaws.com" } }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://m2.eu-central-1.amazonaws.com" + "url": "https://m2.us-east-1.api.aws" } }, "params": { - "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://m2.ap-southeast-2.amazonaws.com" + "url": "https://m2-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.eu-west-1.amazonaws.com" + "url": "https://m2-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://m2.us-west-2.amazonaws.com" + "url": "https://m2.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2-fips.us-east-1.api.aws" + "url": "https://m2.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://m2-fips.us-east-1.amazonaws.com" + "url": "https://m2-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.us-east-1.api.aws" + "url": "https://m2-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://m2.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://m2.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -217,8 +254,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -230,73 +278,82 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://m2-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2-fips.cn-north-1.amazonaws.com.cn" + "url": "https://m2-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://m2.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://m2.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://m2.cn-north-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { - "Region": "cn-north-1", + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -307,8 +364,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -319,10 +376,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/m2/src/main/resources/codegen-resources/service-2.json b/services/m2/src/main/resources/codegen-resources/service-2.json index d2dea74eb14a..f5c8407229f3 100644 --- a/services/m2/src/main/resources/codegen-resources/service-2.json +++ b/services/m2/src/main/resources/codegen-resources/service-2.json @@ -726,6 +726,10 @@ "shape":"EntityName", "documentation":"

      The name of the application.

      " }, + "roleArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the role associated with the application.

      " + }, "status":{ "shape":"ApplicationLifecycle", "documentation":"

      The status of the application.

      " @@ -782,7 +786,7 @@ }, "Arn":{ "type":"string", - "pattern":"^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$" + "pattern":"^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]|):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$" }, "ArnList":{ "type":"list", @@ -835,7 +839,10 @@ "shape":"Identifier", "documentation":"

      The unique identifier of the application that hosts this batch job.

      " }, - "batchJobIdentifier":{"shape":"BatchJobIdentifier"}, + "batchJobIdentifier":{ + "shape":"BatchJobIdentifier", + "documentation":"

      The unique identifier of this batch job.

      " + }, "endTime":{ "shape":"Timestamp", "documentation":"

      The timestamp when this batch job execution ended.

      " @@ -858,7 +865,7 @@ }, "returnCode":{ "shape":"String", - "documentation":"

      " + "documentation":"

      The batch job return code from either the Blu Age or Micro Focus runtime engines. For more information, see Batch return codes in the IBM WebSphere Application Server documentation.

      " }, "startTime":{ "shape":"Timestamp", @@ -908,7 +915,7 @@ }, "BatchParamKey":{ "type":"string", - "documentation":"

      Parameter key: the first character must be alphabetic. Can be of up to 8 alphanumeric characters.

      ", + "documentation":"

      See https://www.ibm.com/docs/en/workload-automation/9.3.0?topic=zos-coding-variables-in-jcl to get details about limits for both keys and values: 8 for keys (variable names), 44 for values (variable values) In addition, keys will be only alphabetic characters and digits, without any space or special characters (dash, underscore, etc ...)

      Parameter key: the first character must be alphabetic. Can be of up to 8 alphanumeric characters.

      ", "max":8, "min":1, "pattern":"^[A-Za-z][A-Za-z0-9]{1,7}$" @@ -1006,6 +1013,10 @@ "shape":"EntityName", "documentation":"

      The unique identifier of the application.

      " }, + "roleArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the role associated with the application.

      " + }, "tags":{ "shape":"TagMap", "documentation":"

      A list of tags to apply to the application.

      " @@ -1364,6 +1375,14 @@ "shape":"GdgDetailAttributes", "documentation":"

      The generation data group of the data set.

      " }, + "po":{ + "shape":"PoDetailAttributes", + "documentation":"

      The details of a PO type data set.

      " + }, + "ps":{ + "shape":"PsDetailAttributes", + "documentation":"

      The details of a PS type data set.

      " + }, "vsam":{ "shape":"VsamDetailAttributes", "documentation":"

      The details of a VSAM data set.

      " @@ -1379,6 +1398,14 @@ "shape":"GdgAttributes", "documentation":"

      The generation data group of the data set.

      " }, + "po":{ + "shape":"PoAttributes", + "documentation":"

      The details of a PO type data set.

      " + }, + "ps":{ + "shape":"PsAttributes", + "documentation":"

      The details of a PS type data set.

      " + }, "vsam":{ "shape":"VsamAttributes", "documentation":"

      The details of a VSAM data set.

      " @@ -1841,6 +1868,10 @@ "shape":"EntityName", "documentation":"

      The unique identifier of the application.

      " }, + "roleArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the role associated with the application.

      " + }, "status":{ "shape":"ApplicationLifecycle", "documentation":"

      The status of the application.

      " @@ -1954,7 +1985,10 @@ "shape":"Identifier", "documentation":"

      The identifier of the application.

      " }, - "batchJobIdentifier":{"shape":"BatchJobIdentifier"}, + "batchJobIdentifier":{ + "shape":"BatchJobIdentifier", + "documentation":"

      The unique identifier of this batch job.

      " + }, "endTime":{ "shape":"Timestamp", "documentation":"

      The timestamp when the batch job execution ended.

      " @@ -1981,7 +2015,7 @@ }, "returnCode":{ "shape":"String", - "documentation":"

      " + "documentation":"

      The batch job return code from either the Blu Age or Micro Focus runtime engines. For more information, see Batch return codes in the IBM WebSphere Application Server documentation.

      " }, "startTime":{ "shape":"Timestamp", @@ -2800,6 +2834,46 @@ }, "documentation":"

      The scheduled maintenance for a runtime engine.

      " }, + "PoAttributes":{ + "type":"structure", + "required":[ + "format", + "memberFileExtensions" + ], + "members":{ + "encoding":{ + "shape":"String", + "documentation":"

      The character set encoding of the data set.

      " + }, + "format":{ + "shape":"String", + "documentation":"

      The format of the data set records.

      " + }, + "memberFileExtensions":{ + "shape":"String20List", + "documentation":"

      An array containing one or more filename extensions, allowing you to specify which files to be included as PDS member.

      " + } + }, + "documentation":"

      The supported properties for a PO type data set.

      " + }, + "PoDetailAttributes":{ + "type":"structure", + "required":[ + "encoding", + "format" + ], + "members":{ + "encoding":{ + "shape":"String", + "documentation":"

      The character set encoding of the data set.

      " + }, + "format":{ + "shape":"String", + "documentation":"

      The format of the data set records.

      " + } + }, + "documentation":"

      The supported properties for a PO type data set.

      " + }, "PortList":{ "type":"list", "member":{"shape":"Integer"}, @@ -2827,6 +2901,39 @@ }, "documentation":"

      The primary key for a KSDS data set.

      " }, + "PsAttributes":{ + "type":"structure", + "required":["format"], + "members":{ + "encoding":{ + "shape":"String", + "documentation":"

      The character set encoding of the data set.

      " + }, + "format":{ + "shape":"String", + "documentation":"

      The format of the data set records.

      " + } + }, + "documentation":"

      The supported properties for a PS type data set.

      " + }, + "PsDetailAttributes":{ + "type":"structure", + "required":[ + "encoding", + "format" + ], + "members":{ + "encoding":{ + "shape":"String", + "documentation":"

      The character set encoding of the data set.

      " + }, + "format":{ + "shape":"String", + "documentation":"

      The format of the data set records.

      " + } + }, + "documentation":"

      The supported properties for a PS type data set.

      " + }, "RecordLength":{ "type":"structure", "required":[ @@ -3024,6 +3131,12 @@ "type":"string", "pattern":"^\\S{1,2000}$" }, + "String20List":{ + "type":"list", + "member":{"shape":"String20"}, + "max":10, + "min":1 + }, "String50":{ "type":"string", "pattern":"^\\S{1,50}$" diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 03595a974094..fa7af5a37ad2 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 4d5e70175e74..9fc5a8e0b0a7 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 8d9f8186c6e1..cc08f66e8ed5 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 01a4d63a5669..48120d53d6ec 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 601a9dcd8e01..1ee45b5cc3de 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 577ec4db3b92..170e27c0b8d8 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 7f1f97ae0600..6a701f8264aa 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 5da2c9714623..b1248f056e03 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index e73a30b35f72..21168163f10d 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 07fd31a8d79d..dea3fb2e25ec 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index 850842fd9acf..a26677a7c5a5 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1181,7 +1181,7 @@ }, "AacCodingMode": { "type": "string", - "documentation": "The Coding mode that you specify determines the number of audio channels and the audio channel layout metadata in your AAC output. Valid coding modes depend on the Rate control mode and Profile that you select. The following list shows the number of audio channels and channel layout for each coding mode. * 1.0 Audio Description (Receiver Mix): One channel, C. Includes audio description data from your stereo input. For more information see ETSI TS 101 154 Annex E. * 1.0 Mono: One channel, C. * 2.0 Stereo: Two channels, L, R. * 5.1 Surround: Five channels, C, L, R, Ls, Rs, LFE.", + "documentation": "The Coding mode that you specify determines the number of audio channels and the audio channel layout metadata in your AAC output. Valid coding modes depend on the Rate control mode and Profile that you select. The following list shows the number of audio channels and channel layout for each coding mode. * 1.0 Audio Description (Receiver Mix): One channel, C. Includes audio description data from your stereo input. For more information see ETSI TS 101 154 Annex E. * 1.0 Mono: One channel, C. * 2.0 Stereo: Two channels, L, R. * 5.1 Surround: Six channels, C, L, R, Ls, Rs, LFE.", "enum": [ "AD_RECEIVER_MIX", "CODING_MODE_1_0", @@ -1227,7 +1227,7 @@ "CodingMode": { "shape": "AacCodingMode", "locationName": "codingMode", - "documentation": "The Coding mode that you specify determines the number of audio channels and the audio channel layout metadata in your AAC output. Valid coding modes depend on the Rate control mode and Profile that you select. The following list shows the number of audio channels and channel layout for each coding mode. * 1.0 Audio Description (Receiver Mix): One channel, C. Includes audio description data from your stereo input. For more information see ETSI TS 101 154 Annex E. * 1.0 Mono: One channel, C. * 2.0 Stereo: Two channels, L, R. * 5.1 Surround: Five channels, C, L, R, Ls, Rs, LFE." + "documentation": "The Coding mode that you specify determines the number of audio channels and the audio channel layout metadata in your AAC output. Valid coding modes depend on the Rate control mode and Profile that you select. The following list shows the number of audio channels and channel layout for each coding mode. * 1.0 Audio Description (Receiver Mix): One channel, C. Includes audio description data from your stereo input. For more information see ETSI TS 101 154 Annex E. * 1.0 Mono: One channel, C. * 2.0 Stereo: Two channels, L, R. * 5.1 Surround: Six channels, C, L, R, Ls, Rs, LFE." }, "RateControlMode": { "shape": "AacRateControlMode", @@ -1438,7 +1438,7 @@ }, "AdvancedInputFilter": { "type": "string", - "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step.Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise.", + "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step. Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise.", "enum": [ "ENABLED", "DISABLED" @@ -2754,7 +2754,7 @@ "documentation": "If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." } }, - "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 20 captions selectors per input." + "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input." }, "CaptionSourceConvertPaintOnToPopOn": { "type": "string", @@ -6251,6 +6251,11 @@ "locationName": "alternateTransferFunctionSei", "documentation": "Enables Alternate Transfer Function SEI message for outputs using Hybrid Log Gamma (HLG) Electro-Optical Transfer Function (EOTF)." }, + "BandwidthReductionFilter": { + "shape": "BandwidthReductionFilter", + "locationName": "bandwidthReductionFilter", + "documentation": "The Bandwidth reduction filter increases the video quality of your output relative to its bitrate. Use to lower the bitrate of your constant quality QVBR output, with little or no perceptual decrease in quality. Or, use to increase the video quality of outputs with other rate control modes relative to the bitrate that you specify. Bandwidth reduction increases further when your input is low quality or noisy. Outputs that use this feature incur pro-tier pricing. When you include Bandwidth reduction filter, you cannot include the Noise reducer preprocessor." + }, "Bitrate": { "shape": "__integerMin1000Max1466400000", "locationName": "bitrate", @@ -6891,6 +6896,11 @@ "locationName": "programDateTimePeriod", "documentation": "Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds." }, + "ProgressiveWriteHlsManifest": { + "shape": "HlsProgressiveWriteHlsManifest", + "locationName": "progressiveWriteHlsManifest", + "documentation": "Specify whether MediaConvert generates HLS manifests while your job is running or when your job is complete. To generate HLS manifests while your job is running: Choose Enabled. Use if you want to play back your content as soon as it's available. MediaConvert writes the parent and child manifests after the first three media segments are written to your destination S3 bucket. It then writes new updated manifests after each additional segment is written. The parent manifest includes the latest BANDWIDTH and AVERAGE-BANDWIDTH attributes, and child manifests include the latest available media segment. When your job completes, the final child playlists include an EXT-X-ENDLIST tag. To generate HLS manifests only when your job completes: Choose Disabled." + }, "SegmentControl": { "shape": "HlsSegmentControl", "locationName": "segmentControl", @@ -6909,7 +6919,7 @@ "SegmentsPerSubdirectory": { "shape": "__integerMin1Max2147483647", "locationName": "segmentsPerSubdirectory", - "documentation": "Number of segments to write to a subdirectory before starting a new one. directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect." + "documentation": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect." }, "StreamInfResolution": { "shape": "HlsStreamInfResolution", @@ -7057,6 +7067,14 @@ "EXCLUDE" ] }, + "HlsProgressiveWriteHlsManifest": { + "type": "string", + "documentation": "Specify whether MediaConvert generates HLS manifests while your job is running or when your job is complete. To generate HLS manifests while your job is running: Choose Enabled. Use if you want to play back your content as soon as it's available. MediaConvert writes the parent and child manifests after the first three media segments are written to your destination S3 bucket. It then writes new updated manifests after each additional segment is written. The parent manifest includes the latest BANDWIDTH and AVERAGE-BANDWIDTH attributes, and child manifests include the latest available media segment. When your job completes, the final child playlists include an EXT-X-ENDLIST tag. To generate HLS manifests only when your job completes: Choose Disabled.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "HlsRenditionGroupSettings": { "type": "structure", "members": { @@ -7251,7 +7269,7 @@ "AdvancedInputFilter": { "shape": "AdvancedInputFilter", "locationName": "advancedInputFilter", - "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step.Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise." + "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step. Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise." }, "AdvancedInputFilterSettings": { "shape": "AdvancedInputFilterSettings", @@ -7271,7 +7289,7 @@ "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 20 captions selectors per input." + "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -7488,7 +7506,7 @@ "AdvancedInputFilter": { "shape": "AdvancedInputFilter", "locationName": "advancedInputFilter", - "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step.Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise." + "documentation": "Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step. Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise." }, "AdvancedInputFilterSettings": { "shape": "AdvancedInputFilterSettings", @@ -7508,7 +7526,7 @@ "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 20 captions selectors per input." + "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -8109,7 +8127,7 @@ "documentation": "Provide your Kantar license ID number. You should get this number from Kantar." }, "KantarServerUrl": { - "shape": "__stringPatternHttpsKantarmediaComFr", + "shape": "__stringPatternHttpsKantarmedia", "locationName": "kantarServerUrl", "documentation": "Provide the HTTPS endpoint to the Kantar server. You should get this endpoint from Kantar." }, @@ -9932,7 +9950,8 @@ "D_10", "XDCAM", "OP1A", - "XAVC" + "XAVC", + "XDCAM_RDD9" ] }, "MxfSettings": { @@ -11904,7 +11923,7 @@ "Height": { "shape": "__integerMin32Max8192", "locationName": "height", - "documentation": "Use the Height (Height) setting to define the video resolution height for this output. Specify in pixels. If you don't provide a value here, the service will use the input height." + "documentation": "Use Height to define the video resolution height, in pixels, for this output. To use the same resolution as your input: Leave both Width and Height blank. To evenly scale from your input resolution: Leave Height blank and enter a value for Width. For example, if your input is 1920x1080 and you set Width to 1280, your output will be 1280x720." }, "Position": { "shape": "Rectangle", @@ -11939,7 +11958,7 @@ "Width": { "shape": "__integerMin32Max8192", "locationName": "width", - "documentation": "Use Width (Width) to define the video resolution width, in pixels, for this output. If you don't provide a value here, the service will use the input width." + "documentation": "Use Width to define the video resolution width, in pixels, for this output. To use the same resolution as your input: Leave both Width and Height blank. To evenly scale from your input resolution: Leave Width blank and enter a value for Height. For example, if your input is 1920x1080 and you set Height to 720, your output will be 1280x720." } }, "documentation": "Settings related to video encoding of your output. The specific video settings depend on the video codec that you choose. When you work directly in your JSON job specification, include one instance of Video description (VideoDescription) per output." @@ -13808,9 +13827,9 @@ "type": "string", "pattern": "^https:\\/\\/" }, - "__stringPatternHttpsKantarmediaComFr": { + "__stringPatternHttpsKantarmedia": { "type": "string", - "pattern": "^https:\\/\\/.*.kantarmedia.(com|fr)$" + "pattern": "^https:\\/\\/.*.kantarmedia.*$" }, "__stringPatternIdentityAZaZ26AZaZ09163": { "type": "string", diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 5bd88c131896..c544396a0e9d 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 medialive diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 22150d006be7..b9c96972a4fe 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml new file mode 100644 index 000000000000..f9e89ba7026e --- /dev/null +++ b/services/mediapackagev2/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.93-SNAPSHOT + + mediapackagev2 + AWS Java SDK :: Services :: Media Package V2 + The AWS Java SDK for Media Package V2 module holds the client classes that are used for + communicating with Media Package V2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.mediapackagev2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..78b14b1846ab --- /dev/null +++ b/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mediapackagev2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mediapackagev2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mediapackagev2.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mediapackagev2.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-tests.json b/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..a74e6078b1d1 --- /dev/null +++ b/services/mediapackagev2/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mediapackagev2.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/paginators-1.json b/services/mediapackagev2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..92079806f5dc --- /dev/null +++ b/services/mediapackagev2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListChannelGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, + "ListChannels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, + "ListOriginEndpoints": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + } + } +} diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json b/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a0555c730b73 --- /dev/null +++ b/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2492 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-12-25", + "endpointPrefix":"mediapackagev2", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"mediapackagev2", + "serviceFullName":"AWS Elemental MediaPackage v2", + "serviceId":"MediaPackageV2", + "signatureVersion":"v4", + "signingName":"mediapackagev2", + "uid":"mediapackagev2-2022-12-25" + }, + "operations":{ + "CreateChannel":{ + "name":"CreateChannel", + "http":{ + "method":"POST", + "requestUri":"/channelGroup/{ChannelGroupName}/channel", + "responseCode":200 + }, + "input":{"shape":"CreateChannelRequest"}, + "output":{"shape":"CreateChannelResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Create a channel to start receiving content streams. The channel represents the input to MediaPackage for incoming live content from an encoder such as AWS Elemental MediaLive. The channel receives content, and after packaging it, outputs it through an origin endpoint to downstream devices (such as video players or CDNs) that request the content. You can create only one channel with each request. We recommend that you spread out channels between channel groups, such as putting redundant channels in the same AWS Region in different channel groups.

      ", + "idempotent":true + }, + "CreateChannelGroup":{ + "name":"CreateChannelGroup", + "http":{ + "method":"POST", + "requestUri":"/channelGroup", + "responseCode":200 + }, + "input":{"shape":"CreateChannelGroupRequest"}, + "output":{"shape":"CreateChannelGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Create a channel group to group your channels and origin endpoints. A channel group is the top-level resource that consists of channels and origin endpoints that are associated with it and that provides predictable URLs for stream delivery. All channels and origin endpoints within the channel group are guaranteed to share the DNS. You can create only one channel group with each request.

      ", + "idempotent":true + }, + "CreateOriginEndpoint":{ + "name":"CreateOriginEndpoint", + "http":{ + "method":"POST", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint", + "responseCode":200 + }, + "input":{"shape":"CreateOriginEndpointRequest"}, + "output":{"shape":"CreateOriginEndpointResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      The endpoint is attached to a channel, and represents the output of the live content. You can associate multiple endpoints to a single channel. Each endpoint gives players and downstream CDNs (such as Amazon CloudFront) access to the content for playback. Content can't be served from a channel until it has an endpoint. You can create only one endpoint with each request.

      ", + "idempotent":true + }, + "DeleteChannel":{ + "name":"DeleteChannel", + "http":{ + "method":"DELETE", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/", + "responseCode":200 + }, + "input":{"shape":"DeleteChannelRequest"}, + "output":{"shape":"DeleteChannelResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Delete a channel to stop AWS Elemental MediaPackage from receiving further content. You must delete the channel's origin endpoints before you can delete the channel.

      ", + "idempotent":true + }, + "DeleteChannelGroup":{ + "name":"DeleteChannelGroup", + "http":{ + "method":"DELETE", + "requestUri":"/channelGroup/{ChannelGroupName}", + "responseCode":200 + }, + "input":{"shape":"DeleteChannelGroupRequest"}, + "output":{"shape":"DeleteChannelGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Delete a channel group. You must delete the channel group's channels and origin endpoints before you can delete the channel group. If you delete a channel group, you'll lose access to the egress domain and will have to create a new channel group to replace it.

      ", + "idempotent":true + }, + "DeleteChannelPolicy":{ + "name":"DeleteChannelPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/policy", + "responseCode":200 + }, + "input":{"shape":"DeleteChannelPolicyRequest"}, + "output":{"shape":"DeleteChannelPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Delete a channel policy.

      ", + "idempotent":true + }, + "DeleteOriginEndpoint":{ + "name":"DeleteOriginEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}", + "responseCode":200 + }, + "input":{"shape":"DeleteOriginEndpointRequest"}, + "output":{"shape":"DeleteOriginEndpointResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Origin endpoints can serve content until they're deleted. Delete the endpoint if it should no longer respond to playback requests. You must delete all endpoints from a channel before you can delete the channel.

      ", + "idempotent":true + }, + "DeleteOriginEndpointPolicy":{ + "name":"DeleteOriginEndpointPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/policy", + "responseCode":200 + }, + "input":{"shape":"DeleteOriginEndpointPolicyRequest"}, + "output":{"shape":"DeleteOriginEndpointPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Delete an origin endpoint policy.

      ", + "idempotent":true + }, + "GetChannel":{ + "name":"GetChannel", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/", + "responseCode":200 + }, + "input":{"shape":"GetChannelRequest"}, + "output":{"shape":"GetChannelResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves the specified channel that's configured in AWS Elemental MediaPackage, including the origin endpoints that are associated with it.

      " + }, + "GetChannelGroup":{ + "name":"GetChannelGroup", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}", + "responseCode":200 + }, + "input":{"shape":"GetChannelGroupRequest"}, + "output":{"shape":"GetChannelGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves the specified channel group that's configured in AWS Elemental MediaPackage, including the channels and origin endpoints that are associated with it.

      " + }, + "GetChannelPolicy":{ + "name":"GetChannelPolicy", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetChannelPolicyRequest"}, + "output":{"shape":"GetChannelPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves the specified channel policy that's configured in AWS Elemental MediaPackage. With policies, you can specify who has access to AWS resources and what actions they can perform on those resources.

      " + }, + "GetOriginEndpoint":{ + "name":"GetOriginEndpoint", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}", + "responseCode":200 + }, + "input":{"shape":"GetOriginEndpointRequest"}, + "output":{"shape":"GetOriginEndpointResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves the specified origin endpoint that's configured in AWS Elemental MediaPackage to obtain its playback URL and to view the packaging settings that it's currently using.

      " + }, + "GetOriginEndpointPolicy":{ + "name":"GetOriginEndpointPolicy", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetOriginEndpointPolicyRequest"}, + "output":{"shape":"GetOriginEndpointPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves the specified origin endpoint policy that's configured in AWS Elemental MediaPackage.

      " + }, + "ListChannelGroups":{ + "name":"ListChannelGroups", + "http":{ + "method":"GET", + "requestUri":"/channelGroup", + "responseCode":200 + }, + "input":{"shape":"ListChannelGroupsRequest"}, + "output":{"shape":"ListChannelGroupsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Retrieves all channel groups that are configured in AWS Elemental MediaPackage, including the channels and origin endpoints that are associated with it.

      " + }, + "ListChannels":{ + "name":"ListChannels", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel", + "responseCode":200 + }, + "input":{"shape":"ListChannelsRequest"}, + "output":{"shape":"ListChannelsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Retrieves all channels in a specific channel group that are configured in AWS Elemental MediaPackage, including the origin endpoints that are associated with it.

      " + }, + "ListOriginEndpoints":{ + "name":"ListOriginEndpoints", + "http":{ + "method":"GET", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint", + "responseCode":200 + }, + "input":{"shape":"ListOriginEndpointsRequest"}, + "output":{"shape":"ListOriginEndpointsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves all origin endpoints in a specific channel that are configured in AWS Elemental MediaPackage.

      " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

      Lists the tags assigned to a resource.

      " + }, + "PutChannelPolicy":{ + "name":"PutChannelPolicy", + "http":{ + "method":"PUT", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/policy", + "responseCode":200 + }, + "input":{"shape":"PutChannelPolicyRequest"}, + "output":{"shape":"PutChannelPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Attaches an IAM policy to the specified channel. With policies, you can specify who has access to AWS resources and what actions they can perform on those resources. You can attach only one policy with each request.

      ", + "idempotent":true + }, + "PutOriginEndpointPolicy":{ + "name":"PutOriginEndpointPolicy", + "http":{ + "method":"POST", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/policy", + "responseCode":200 + }, + "input":{"shape":"PutOriginEndpointPolicyRequest"}, + "output":{"shape":"PutOriginEndpointPolicyResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Attaches an IAM policy to the specified origin endpoint. You can attach only one policy with each request.

      ", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

      Assigns one of more tags (key-value pairs) to the specified MediaPackage resource.

      Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

      " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

      Removes one or more tags from the specified resource.

      ", + "idempotent":true + }, + "UpdateChannel":{ + "name":"UpdateChannel", + "http":{ + "method":"PUT", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelRequest"}, + "output":{"shape":"UpdateChannelResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Update the specified channel. You can edit if MediaPackage sends ingest or egress access logs to the CloudWatch log group, if content will be encrypted, the description on a channel, and your channel's policy settings. You can't edit the name of the channel or CloudFront distribution details.

      Any edits you make that impact the video output may not be reflected for a few minutes.

      ", + "idempotent":true + }, + "UpdateChannelGroup":{ + "name":"UpdateChannelGroup", + "http":{ + "method":"PUT", + "requestUri":"/channelGroup/{ChannelGroupName}", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelGroupRequest"}, + "output":{"shape":"UpdateChannelGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Update the specified channel group. You can edit the description on a channel group for easier identification later from the AWS Elemental MediaPackage console. You can't edit the name of the channel group.

      Any edits you make that impact the video output may not be reflected for a few minutes.

      ", + "idempotent":true + }, + "UpdateOriginEndpoint":{ + "name":"UpdateOriginEndpoint", + "http":{ + "method":"PUT", + "requestUri":"/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}", + "responseCode":200 + }, + "input":{"shape":"UpdateOriginEndpointRequest"}, + "output":{"shape":"UpdateOriginEndpointResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Update the specified origin endpoint. Edit the packaging preferences on an endpoint to optimize the viewing experience. You can't edit the name of the endpoint.

      Any edits you make that impact the video output may not be reflected for a few minutes.

      ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

      ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AdMarkerHls":{ + "type":"string", + "enum":["DATERANGE"] + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ChannelGroupListConfiguration":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "Arn", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      Any descriptive information that you want to add to the channel group for future identification purposes.

      " + } + }, + "documentation":"

      The configuration of the channel group.

      " + }, + "ChannelGroupsList":{ + "type":"list", + "member":{"shape":"ChannelGroupListConfiguration"} + }, + "ChannelList":{ + "type":"list", + "member":{"shape":"ChannelListConfiguration"} + }, + "ChannelListConfiguration":{ + "type":"structure", + "required":[ + "Arn", + "ChannelName", + "ChannelGroupName", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      Any descriptive information that you want to add to the channel for future identification purposes.

      " + } + }, + "documentation":"

      The configuration of the channel.

      " + }, + "CmafEncryptionMethod":{ + "type":"string", + "enum":[ + "CENC", + "CBCS" + ] + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ConflictExceptionType":{ + "shape":"ConflictExceptionType", + "documentation":"

      The type of ConflictException.

      " + } + }, + "documentation":"

      Updating or deleting this resource can cause an inconsistent state.

      ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ConflictExceptionType":{ + "type":"string", + "enum":[ + "RESOURCE_IN_USE", + "RESOURCE_ALREADY_EXISTS", + "IDEMPOTENT_PARAMETER_MISMATCH", + "CONFLICTING_OPERATION" + ] + }, + "ContainerType":{ + "type":"string", + "enum":[ + "TS", + "CMAF" + ] + }, + "CreateChannelGroupRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region. You can't use spaces in the name. You can't change the name after you create the channel group.

      " + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      A unique, case-sensitive token that you provide to ensure the idempotency of the request.

      ", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amzn-client-token" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Enter any descriptive text that helps you to identify the channel group.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      A comma-separated list of tag key:value pairs that you define. For example:

      \"Key1\": \"Value1\",

      \"Key2\": \"Value2\"

      ", + "locationName":"tags" + } + } + }, + "CreateChannelGroupResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "Arn", + "EgressDomain", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "EgressDomain":{ + "shape":"String", + "documentation":"

      The output domain where the source stream should be sent. Integrate the egress domain with a downstream CDN (such as Amazon CloudFront) or playback device.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel group.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel group.

      " + } + } + }, + "CreateChannelRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group. You can't change the name after you create the channel.

      " + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      A unique, case-sensitive token that you provide to ensure the idempotency of the request.

      ", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amzn-client-token" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Enter any descriptive text that helps you to identify the channel.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      A comma-separated list of tag key:value pairs that you define. For example:

      \"Key1\": \"Value1\",

      \"Key2\": \"Value2\"

      ", + "locationName":"tags" + } + } + }, + "CreateChannelResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelName", + "ChannelGroupName", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel.

      " + }, + "IngestEndpoints":{"shape":"IngestEndpointList"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel.

      " + } + } + }, + "CreateHlsManifestConfiguration":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{ + "shape":"ManifestName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ChildManifestName":{ + "shape":"ManifestName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index, with an added suffix to distinguish it from the manifest name. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ScteHls":{"shape":"ScteHls"}, + "ManifestWindowSeconds":{ + "shape":"CreateHlsManifestConfigurationManifestWindowSecondsInteger", + "documentation":"

      The total duration (in seconds) of the manifest's content.

      " + }, + "ProgramDateTimeIntervalSeconds":{ + "shape":"CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger", + "documentation":"

      Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

      Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

      " + } + }, + "documentation":"

      Create an HTTP live streaming (HLS) manifest configuration.

      " + }, + "CreateHlsManifestConfigurationManifestWindowSecondsInteger":{ + "type":"integer", + "box":true, + "max":900, + "min":30 + }, + "CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger":{ + "type":"integer", + "box":true, + "max":1209600, + "min":1 + }, + "CreateHlsManifests":{ + "type":"list", + "member":{"shape":"CreateHlsManifestConfiguration"} + }, + "CreateLowLatencyHlsManifestConfiguration":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{ + "shape":"ManifestName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ChildManifestName":{ + "shape":"ManifestName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index, with an added suffix to distinguish it from the manifest name. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ScteHls":{"shape":"ScteHls"}, + "ManifestWindowSeconds":{ + "shape":"CreateLowLatencyHlsManifestConfigurationManifestWindowSecondsInteger", + "documentation":"

      The total duration (in seconds) of the manifest's content.

      " + }, + "ProgramDateTimeIntervalSeconds":{ + "shape":"CreateLowLatencyHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger", + "documentation":"

      Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

      Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

      " + } + }, + "documentation":"

      Create a low-latency HTTP live streaming (HLS) manifest configuration.

      " + }, + "CreateLowLatencyHlsManifestConfigurationManifestWindowSecondsInteger":{ + "type":"integer", + "box":true, + "max":900, + "min":30 + }, + "CreateLowLatencyHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger":{ + "type":"integer", + "box":true, + "max":1209600, + "min":1 + }, + "CreateLowLatencyHlsManifests":{ + "type":"list", + "member":{"shape":"CreateLowLatencyHlsManifestConfiguration"} + }, + "CreateOriginEndpointRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and must be unique for your account in the AWS Region and channel. You can't use spaces in the name. You can't change the name after you create the endpoint.

      " + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container to attach to this origin endpoint. A container type is a file format that encapsulates one or more media streams, such as audio and video, into a single file. You can't change the container type after you create the endpoint.

      " + }, + "Segment":{ + "shape":"Segment", + "documentation":"

      The segment configuration, including the segment name, duration, and other configuration values.

      " + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      A unique, case-sensitive token that you provide to ensure the idempotency of the request.

      ", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amzn-client-token" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Enter any descriptive text that helps you to identify the origin endpoint.

      " + }, + "StartoverWindowSeconds":{ + "shape":"CreateOriginEndpointRequestStartoverWindowSecondsInteger", + "documentation":"

      The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window. The maximum startover window is 1,209,600 seconds (14 days).

      " + }, + "HlsManifests":{ + "shape":"CreateHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"CreateLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      A comma-separated list of tag key:value pairs that you define. For example:

      \"Key1\": \"Value1\",

      \"Key2\": \"Value2\"

      " + } + } + }, + "CreateOriginEndpointRequestStartoverWindowSecondsInteger":{ + "type":"integer", + "box":true, + "max":1209600, + "min":60 + }, + "CreateOriginEndpointResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType", + "Segment", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      " + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container attached to this origin endpoint.

      " + }, + "Segment":{ + "shape":"Segment", + "documentation":"

      The segment configuration, including the segment name, duration, and other configuration values.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was modified.

      " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      The description for your origin endpoint.

      " + }, + "StartoverWindowSeconds":{ + "shape":"Integer", + "documentation":"

      The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window.

      " + }, + "HlsManifests":{ + "shape":"GetHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"GetLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the origin endpoint.

      " + } + } + }, + "DeleteChannelGroupRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + } + } + }, + "DeleteChannelGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteChannelPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + } + } + }, + "DeleteChannelPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteChannelRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + } + } + }, + "DeleteChannelResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteOriginEndpointPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + } + } + }, + "DeleteOriginEndpointPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteOriginEndpointRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + } + } + }, + "DeleteOriginEndpointResponse":{ + "type":"structure", + "members":{ + } + }, + "DrmSystem":{ + "type":"string", + "enum":[ + "CLEAR_KEY_AES_128", + "FAIRPLAY", + "PLAYREADY", + "WIDEVINE" + ] + }, + "Encryption":{ + "type":"structure", + "required":[ + "EncryptionMethod", + "SpekeKeyProvider" + ], + "members":{ + "ConstantInitializationVector":{ + "shape":"EncryptionConstantInitializationVectorString", + "documentation":"

      A 128-bit, 16-byte hex value represented by a 32-character string, used in conjunction with the key for encrypting content. If you don't specify a value, then MediaPackage creates the constant initialization vector (IV).

      " + }, + "EncryptionMethod":{ + "shape":"EncryptionMethod", + "documentation":"

      The encryption method to use.

      " + }, + "KeyRotationIntervalSeconds":{ + "shape":"EncryptionKeyRotationIntervalSecondsInteger", + "documentation":"

      The frequency (in seconds) of key changes for live workflows, in which content is streamed real time. The service retrieves content keys before the live content begins streaming, and then retrieves them as needed over the lifetime of the workflow. By default, key rotation is set to 300 seconds (5 minutes), the minimum rotation interval, which is equivalent to setting it to 300. If you don't enter an interval, content keys aren't rotated.

      The following example setting causes the service to rotate keys every thirty minutes: 1800

      " + }, + "SpekeKeyProvider":{ + "shape":"SpekeKeyProvider", + "documentation":"

      The parameters for the SPEKE key provider.

      " + } + }, + "documentation":"

      The parameters for encrypting content.

      " + }, + "EncryptionConstantInitializationVectorString":{ + "type":"string", + "max":32, + "min":32, + "pattern":"[0-9a-fA-F]+" + }, + "EncryptionContractConfiguration":{ + "type":"structure", + "required":[ + "PresetSpeke20Audio", + "PresetSpeke20Video" + ], + "members":{ + "PresetSpeke20Audio":{ + "shape":"PresetSpeke20Audio", + "documentation":"

      A collection of audio encryption presets.

      Value description:

      • PRESET-AUDIO-1 - Use one content key to encrypt all of the audio tracks in your stream.

      • PRESET-AUDIO-2 - Use one content key to encrypt all of the stereo audio tracks and one content key to encrypt all of the multichannel audio tracks.

      • PRESET-AUDIO-3 - Use one content key to encrypt all of the stereo audio tracks, one content key to encrypt all of the multichannel audio tracks with 3 to 6 channels, and one content key to encrypt all of the multichannel audio tracks with more than 6 channels.

      • SHARED - Use the same content key for all of the audio and video tracks in your stream.

      • UNENCRYPTED - Don't encrypt any of the audio tracks in your stream.

      " + }, + "PresetSpeke20Video":{ + "shape":"PresetSpeke20Video", + "documentation":"

      A collection of video encryption presets.

      Value description:

      • PRESET-VIDEO-1 - Use one content key to encrypt all of the video tracks in your stream.

      • PRESET-VIDEO-2 - Use one content key to encrypt all of the SD video tracks and one content key for all HD and higher resolutions video tracks.

      • PRESET-VIDEO-3 - Use one content key to encrypt all of the SD video tracks, one content key for HD video tracks and one content key for all UHD video tracks.

      • PRESET-VIDEO-4 - Use one content key to encrypt all of the SD video tracks, one content key for HD video tracks, one content key for all UHD1 video tracks and one content key for all UHD2 video tracks.

      • PRESET-VIDEO-5 - Use one content key to encrypt all of the SD video tracks, one content key for HD1 video tracks, one content key for HD2 video tracks, one content key for all UHD1 video tracks and one content key for all UHD2 video tracks.

      • PRESET-VIDEO-6 - Use one content key to encrypt all of the SD video tracks, one content key for HD1 video tracks, one content key for HD2 video tracks and one content key for all UHD video tracks.

      • PRESET-VIDEO-7 - Use one content key to encrypt all of the SD+HD1 video tracks, one content key for HD2 video tracks and one content key for all UHD video tracks.

      • PRESET-VIDEO-8 - Use one content key to encrypt all of the SD+HD1 video tracks, one content key for HD2 video tracks, one content key for all UHD1 video tracks and one content key for all UHD2 video tracks.

      • SHARED - Use the same content key for all of the video and audio tracks in your stream.

      • UNENCRYPTED - Don't encrypt any of the video tracks in your stream.

      " + } + }, + "documentation":"

      Configure one or more content encryption keys for your endpoints that use SPEKE Version 2.0. The encryption contract defines which content keys are used to encrypt the audio and video tracks in your stream. To configure the encryption contract, specify which audio and video encryption presets to use.

      " + }, + "EncryptionKeyRotationIntervalSecondsInteger":{ + "type":"integer", + "box":true, + "max":31536000, + "min":300 + }, + "EncryptionMethod":{ + "type":"structure", + "members":{ + "TsEncryptionMethod":{ + "shape":"TsEncryptionMethod", + "documentation":"

      The encryption method to use.

      " + }, + "CmafEncryptionMethod":{ + "shape":"CmafEncryptionMethod", + "documentation":"

      The encryption method to use.

      " + } + }, + "documentation":"

      The encryption type.

      " + }, + "GetChannelGroupRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + } + } + }, + "GetChannelGroupResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "Arn", + "EgressDomain", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "EgressDomain":{ + "shape":"String", + "documentation":"

      The output domain where the source stream should be sent. Integrate the domain with a downstream CDN (such as Amazon CloudFront) or playback device.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel group.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel group.

      ", + "locationName":"tags" + } + } + }, + "GetChannelPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + } + } + }, + "GetChannelPolicyResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "Policy" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "Policy":{ + "shape":"String", + "documentation":"

      The policy assigned to the channel.

      " + } + } + }, + "GetChannelRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + } + } + }, + "GetChannelResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelName", + "ChannelGroupName", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel.

      " + }, + "IngestEndpoints":{"shape":"IngestEndpointList"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel.

      " + } + } + }, + "GetHlsManifestConfiguration":{ + "type":"structure", + "required":[ + "ManifestName", + "Url" + ], + "members":{ + "ManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "Url":{ + "shape":"String", + "documentation":"

      The egress domain URL for stream delivery from MediaPackage.

      " + }, + "ChildManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ManifestWindowSeconds":{ + "shape":"Integer", + "documentation":"

      The total duration (in seconds) of the manifest's content.

      " + }, + "ProgramDateTimeIntervalSeconds":{ + "shape":"Integer", + "documentation":"

      Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

      Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

      " + }, + "ScteHls":{"shape":"ScteHls"} + }, + "documentation":"

      Retrieve the HTTP live streaming (HLS) manifest configuration.

      " + }, + "GetHlsManifests":{ + "type":"list", + "member":{"shape":"GetHlsManifestConfiguration"} + }, + "GetLowLatencyHlsManifestConfiguration":{ + "type":"structure", + "required":[ + "ManifestName", + "Url" + ], + "members":{ + "ManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "Url":{ + "shape":"String", + "documentation":"

      The egress domain URL for stream delivery from MediaPackage.

      " + }, + "ChildManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ManifestWindowSeconds":{ + "shape":"Integer", + "documentation":"

      The total duration (in seconds) of the manifest's content.

      " + }, + "ProgramDateTimeIntervalSeconds":{ + "shape":"Integer", + "documentation":"

      Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

      Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

      " + }, + "ScteHls":{"shape":"ScteHls"} + }, + "documentation":"

      Retrieve the low-latency HTTP live streaming (HLS) manifest configuration.

      " + }, + "GetLowLatencyHlsManifests":{ + "type":"list", + "member":{"shape":"GetLowLatencyHlsManifestConfiguration"} + }, + "GetOriginEndpointPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + } + } + }, + "GetOriginEndpointPolicyResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "Policy" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "OriginEndpointName":{ + "shape":"String", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      " + }, + "Policy":{ + "shape":"String", + "documentation":"

      The policy assigned to the origin endpoint.

      " + } + } + }, + "GetOriginEndpointRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + } + } + }, + "GetOriginEndpointResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType", + "Segment", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      " + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container attached to this origin endpoint.

      " + }, + "Segment":{"shape":"Segment"}, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was modified.

      " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      The description for your origin endpoint.

      " + }, + "StartoverWindowSeconds":{ + "shape":"Integer", + "documentation":"

      The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window.

      " + }, + "HlsManifests":{ + "shape":"GetHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"GetLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the origin endpoint.

      " + } + } + }, + "IdempotencyToken":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\S]+" + }, + "IngestEndpoint":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"String", + "documentation":"

      The system-generated unique identifier for the IngestEndpoint.

      " + }, + "Url":{ + "shape":"String", + "documentation":"

      The ingest domain URL where the source stream should be sent.

      " + } + }, + "documentation":"

      The ingest domain URL where the source stream should be sent.

      " + }, + "IngestEndpointList":{ + "type":"list", + "member":{"shape":"IngestEndpoint"}, + "documentation":"

      The list of ingest endpoints.

      " + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      Indicates that an error from the service occurred while trying to process a request.

      ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListChannelGroupsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListResourceMaxResults", + "documentation":"

      The maximum number of results to return in the response.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request. Use the token to fetch the next page of results.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListChannelGroupsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ChannelGroupsList", + "documentation":"

      The objects being returned.

      " + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request. Use the token to fetch the next page of results.

      " + } + } + }, + "ListChannelsRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "MaxResults":{ + "shape":"ListResourceMaxResults", + "documentation":"

      The maximum number of results to return in the response.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request. Use the token to fetch the next page of results.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListChannelsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ChannelList", + "documentation":"

      The objects being returned.

      " + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request.

      " + } + } + }, + "ListHlsManifestConfiguration":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ChildManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "Url":{ + "shape":"String", + "documentation":"

      The egress domain URL for stream delivery from MediaPackage.

      " + } + }, + "documentation":"

      List the HTTP live streaming (HLS) manifest configuration.

      " + }, + "ListHlsManifests":{ + "type":"list", + "member":{"shape":"ListHlsManifestConfiguration"} + }, + "ListLowLatencyHlsManifestConfiguration":{ + "type":"structure", + "required":["ManifestName"], + "members":{ + "ManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, index. MediaPackage automatically inserts the format extension, such as .m3u8. You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "ChildManifestName":{ + "shape":"ResourceName", + "documentation":"

      A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default child manifest name, index_1. The manifestName on the HLSManifest object overrides the manifestName you provided on the originEndpoint object.

      " + }, + "Url":{ + "shape":"String", + "documentation":"

      The egress domain URL for stream delivery from MediaPackage.

      " + } + }, + "documentation":"

      List the low-latency HTTP live streaming (HLS) manifest configuration.

      " + }, + "ListLowLatencyHlsManifests":{ + "type":"list", + "member":{"shape":"ListLowLatencyHlsManifestConfiguration"} + }, + "ListOriginEndpointsRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "MaxResults":{ + "shape":"ListResourceMaxResults", + "documentation":"

      The maximum number of results to return in the response.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request. Use the token to fetch the next page of results.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListOriginEndpointsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"OriginEndpointsList", + "documentation":"

      The objects being returned.

      " + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The pagination token from the GET list request. Use the token to fetch the next page of results.

      " + } + } + }, + "ListResourceMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

      The ARN of the CloudWatch resource that you want to view tags for.

      ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

      Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

      ", + "locationName":"tags" + } + } + }, + "ManifestName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "OriginEndpointListConfiguration":{ + "type":"structure", + "required":[ + "Arn", + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      " + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container attached to this origin endpoint. A container type is a file format that encapsulates one or more media streams, such as audio and video, into a single file.

      " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Any descriptive information that you want to add to the origin endpoint for future identification purposes.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was modified.

      " + }, + "HlsManifests":{ + "shape":"ListHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"ListLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + } + }, + "documentation":"

      The configuration of the origin endpoint.

      " + }, + "OriginEndpointsList":{ + "type":"list", + "member":{"shape":"OriginEndpointListConfiguration"} + }, + "PolicyText":{ + "type":"string", + "max":6144, + "min":0 + }, + "PresetSpeke20Audio":{ + "type":"string", + "enum":[ + "PRESET_AUDIO_1", + "PRESET_AUDIO_2", + "PRESET_AUDIO_3", + "SHARED", + "UNENCRYPTED" + ] + }, + "PresetSpeke20Video":{ + "type":"string", + "enum":[ + "PRESET_VIDEO_1", + "PRESET_VIDEO_2", + "PRESET_VIDEO_3", + "PRESET_VIDEO_4", + "PRESET_VIDEO_5", + "PRESET_VIDEO_6", + "PRESET_VIDEO_7", + "PRESET_VIDEO_8", + "SHARED", + "UNENCRYPTED" + ] + }, + "PutChannelPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "Policy" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "Policy":{ + "shape":"PolicyText", + "documentation":"

      The policy to attach to the specified channel.

      " + } + } + }, + "PutChannelPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "PutOriginEndpointPolicyRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "Policy" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + }, + "Policy":{ + "shape":"PolicyText", + "documentation":"

      The policy to attach to the specified origin endpoint.

      " + } + } + }, + "PutOriginEndpointPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "ResourceDescription":{ + "type":"string", + "max":1024, + "min":0 + }, + "ResourceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceTypeNotFound":{ + "shape":"ResourceTypeNotFound", + "documentation":"

      The specified resource type wasn't found.

      " + } + }, + "documentation":"

      The specified resource doesn't exist.

      ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceTypeNotFound":{ + "type":"string", + "enum":[ + "CHANNEL_GROUP", + "CHANNEL", + "ORIGIN_ENDPOINT" + ] + }, + "Scte":{ + "type":"structure", + "members":{ + "ScteFilter":{ + "shape":"ScteFilterList", + "documentation":"

      The SCTE-35 message types that you want to be treated as ad markers in the output.

      " + } + }, + "documentation":"

      The SCTE configuration.

      " + }, + "ScteFilter":{ + "type":"string", + "enum":[ + "SPLICE_INSERT", + "BREAK", + "PROVIDER_ADVERTISEMENT", + "DISTRIBUTOR_ADVERTISEMENT", + "PROVIDER_PLACEMENT_OPPORTUNITY", + "DISTRIBUTOR_PLACEMENT_OPPORTUNITY", + "PROVIDER_OVERLAY_PLACEMENT_OPPORTUNITY", + "DISTRIBUTOR_OVERLAY_PLACEMENT_OPPORTUNITY", + "PROGRAM" + ] + }, + "ScteFilterList":{ + "type":"list", + "member":{"shape":"ScteFilter"}, + "max":100, + "min":0 + }, + "ScteHls":{ + "type":"structure", + "members":{ + "AdMarkerHls":{ + "shape":"AdMarkerHls", + "documentation":"

      Ad markers indicate when ads should be inserted during playback. If you include ad markers in the content stream in your upstream encoders, then you need to inform MediaPackage what to do with the ad markers in the output. Choose what you want MediaPackage to do with the ad markers.

      Value description:

      • DATERANGE - Insert EXT-X-DATERANGE tags to signal ad and program transition events in TS and CMAF manifests. If you use DATERANGE, you must set a programDateTimeIntervalSeconds value of 1 or higher. To learn more about DATERANGE, see SCTE-35 Ad Marker EXT-X-DATERANGE.

      " + } + }, + "documentation":"

      The SCTE configuration.

      " + }, + "Segment":{ + "type":"structure", + "members":{ + "SegmentDurationSeconds":{ + "shape":"SegmentSegmentDurationSecondsInteger", + "documentation":"

      The duration (in seconds) of each segment. Enter a value equal to, or a multiple of, the input segment duration. If the value that you enter is different from the input segment duration, MediaPackage rounds segments to the nearest multiple of the input segment duration.

      " + }, + "SegmentName":{ + "shape":"SegmentSegmentNameString", + "documentation":"

      The name that describes the segment. The name is the base name of the segment used in all content manifests inside of the endpoint. You can't use spaces in the name.

      " + }, + "TsUseAudioRenditionGroup":{ + "shape":"Boolean", + "documentation":"

      When selected, MediaPackage bundles all audio tracks in a rendition group. All other tracks in the stream can be used with any audio rendition from the group.

      " + }, + "IncludeIframeOnlyStreams":{ + "shape":"Boolean", + "documentation":"

      When selected, the stream set includes an additional I-frame only stream, along with the other tracks. If false, this extra stream is not included. MediaPackage generates an I-frame only stream from the first rendition in the manifest. The service inserts EXT-I-FRAMES-ONLY tags in the output manifest, and then generates and includes an I-frames only playlist in the stream. This playlist permits player functionality like fast forward and rewind.

      " + }, + "TsIncludeDvbSubtitles":{ + "shape":"Boolean", + "documentation":"

      By default, MediaPackage excludes all digital video broadcasting (DVB) subtitles from the output. When selected, MediaPackage passes through DVB subtitles into the output.

      " + }, + "Scte":{ + "shape":"Scte", + "documentation":"

      The SCTE configuration options in the segment settings.

      " + }, + "Encryption":{"shape":"Encryption"} + }, + "documentation":"

      The segment configuration, including the segment name, duration, and other configuration values.

      " + }, + "SegmentSegmentDurationSecondsInteger":{ + "type":"integer", + "box":true, + "max":30, + "min":1 + }, + "SegmentSegmentNameString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request would cause a service quota to be exceeded.

      ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SpekeKeyProvider":{ + "type":"structure", + "required":[ + "EncryptionContractConfiguration", + "ResourceId", + "DrmSystems", + "RoleArn", + "Url" + ], + "members":{ + "EncryptionContractConfiguration":{ + "shape":"EncryptionContractConfiguration", + "documentation":"

      Configure one or more content encryption keys for your endpoints that use SPEKE Version 2.0. The encryption contract defines which content keys are used to encrypt the audio and video tracks in your stream. To configure the encryption contract, specify which audio and video encryption presets to use.

      " + }, + "ResourceId":{ + "shape":"SpekeKeyProviderResourceIdString", + "documentation":"

      The unique identifier for the content. The service sends this to the key server to identify the current endpoint. How unique you make this depends on how fine-grained you want access controls to be. The service does not permit you to use the same ID for two simultaneous encryption processes. The resource ID is also known as the content ID.

      The following example shows a resource ID: MovieNight20171126093045

      " + }, + "DrmSystems":{ + "shape":"SpekeKeyProviderDrmSystemsList", + "documentation":"

      The DRM solution provider you're using to protect your content during distribution.

      " + }, + "RoleArn":{ + "shape":"SpekeKeyProviderRoleArnString", + "documentation":"

      The ARN for the IAM role granted by the key provider that provides access to the key provider API. This role must have a trust policy that allows MediaPackage to assume the role, and it must have a sufficient permissions policy to allow access to the specific key retrieval URL. Get this from your DRM solution provider.

      Valid format: arn:aws:iam::{accountID}:role/{name}. The following example shows a role ARN: arn:aws:iam::444455556666:role/SpekeAccess

      " + }, + "Url":{ + "shape":"SpekeKeyProviderUrlString", + "documentation":"

      The URL of the API Gateway proxy that you set up to talk to your key server. The API Gateway proxy must reside in the same AWS Region as MediaPackage and must start with https://.

      The following example shows a URL: https://1wm2dx1f33.execute-api.us-west-2.amazonaws.com/SpekeSample/copyProtection

      " + } + }, + "documentation":"

      The parameters for the SPEKE key provider.

      " + }, + "SpekeKeyProviderDrmSystemsList":{ + "type":"list", + "member":{"shape":"DrmSystem"}, + "max":4, + "min":1 + }, + "SpekeKeyProviderResourceIdString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[0-9a-zA-Z_-]+" + }, + "SpekeKeyProviderRoleArnString":{ + "type":"string", + "max":2048, + "min":1 + }, + "SpekeKeyProviderUrlString":{ + "type":"string", + "max":1024, + "min":1 + }, + "String":{"type":"string"}, + "TagArn":{"type":"string"}, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

      The ARN of the MediaPackage resource that you're adding tags to.

      ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

      ", + "locationName":"tags" + } + } + }, + "TagValue":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request throughput limit was exceeded.

      ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TsEncryptionMethod":{ + "type":"string", + "enum":[ + "AES_128", + "SAMPLE_AES" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TagArn", + "documentation":"

      The ARN of the MediaPackage resource that you're removing tags from.

      ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

      The list of tag keys to remove from the resource.

      ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UpdateChannelGroupRequest":{ + "type":"structure", + "required":["ChannelGroupName"], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Any descriptive information that you want to add to the channel group for future identification purposes.

      " + } + } + }, + "UpdateChannelGroupResponse":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "Arn", + "EgressDomain", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "EgressDomain":{ + "shape":"String", + "documentation":"

      The output domain where the source stream is sent. Integrate the domain with a downstream CDN (such as Amazon CloudFront) or playback device.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel group was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel group.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel group.

      ", + "locationName":"tags" + } + } + }, + "UpdateChannelRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Any descriptive information that you want to add to the channel for future identification purposes.

      " + } + } + }, + "UpdateChannelResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelName", + "ChannelGroupName", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) associated with the resource.

      " + }, + "ChannelName":{ + "shape":"String", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "ChannelGroupName":{ + "shape":"String", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the channel was modified.

      " + }, + "Description":{ + "shape":"String", + "documentation":"

      The description for your channel.

      " + }, + "IngestEndpoints":{"shape":"IngestEndpointList"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the channel.

      ", + "locationName":"tags" + } + } + }, + "UpdateOriginEndpointRequest":{ + "type":"structure", + "required":[ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType" + ], + "members":{ + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      ", + "location":"uri", + "locationName":"ChannelGroupName" + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      ", + "location":"uri", + "locationName":"ChannelName" + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      ", + "location":"uri", + "locationName":"OriginEndpointName" + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container attached to this origin endpoint. A container type is a file format that encapsulates one or more media streams, such as audio and video, into a single file.

      " + }, + "Segment":{ + "shape":"Segment", + "documentation":"

      The segment configuration, including the segment name, duration, and other configuration values.

      " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      Any descriptive information that you want to add to the origin endpoint for future identification purposes.

      " + }, + "StartoverWindowSeconds":{ + "shape":"UpdateOriginEndpointRequestStartoverWindowSecondsInteger", + "documentation":"

      The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window. The maximum startover window is 1,209,600 seconds (14 days).

      " + }, + "HlsManifests":{ + "shape":"CreateHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"CreateLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + } + } + }, + "UpdateOriginEndpointRequestStartoverWindowSecondsInteger":{ + "type":"integer", + "box":true, + "max":1209600, + "min":60 + }, + "UpdateOriginEndpointResponse":{ + "type":"structure", + "required":[ + "Arn", + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", + "ContainerType", + "Segment", + "CreatedAt", + "ModifiedAt" + ], + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

      The ARN associated with the resource.

      " + }, + "ChannelGroupName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.

      " + }, + "ChannelName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the channel. The name is the primary identifier for the channel, and must be unique for your account in the AWS Region and channel group.

      " + }, + "OriginEndpointName":{ + "shape":"ResourceName", + "documentation":"

      The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel.

      " + }, + "ContainerType":{ + "shape":"ContainerType", + "documentation":"

      The type of container attached to this origin endpoint.

      " + }, + "Segment":{ + "shape":"Segment", + "documentation":"

      The segment configuration, including the segment name, duration, and other configuration values.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was created.

      " + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"

      The date and time the origin endpoint was modified.

      " + }, + "Description":{ + "shape":"ResourceDescription", + "documentation":"

      The description of the origin endpoint.

      " + }, + "StartoverWindowSeconds":{ + "shape":"Integer", + "documentation":"

      The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window.

      " + }, + "HlsManifests":{ + "shape":"GetHlsManifests", + "documentation":"

      An HTTP live streaming (HLS) manifest configuration.

      " + }, + "LowLatencyHlsManifests":{ + "shape":"GetLowLatencyHlsManifests", + "documentation":"

      A low-latency HLS manifest configuration.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The comma-separated list of tag key:value pairs assigned to the origin endpoint.

      ", + "locationName":"tags" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ValidationExceptionType":{ + "shape":"ValidationExceptionType", + "documentation":"

      The type of ValidationException.

      " + } + }, + "documentation":"

      The input failed to meet the constraints specified by the AWS service.

      ", + "exception":true + }, + "ValidationExceptionType":{ + "type":"string", + "enum":[ + "CONTAINER_TYPE_IMMUTABLE", + "INVALID_PAGINATION_TOKEN", + "INVALID_PAGINATION_MAX_RESULTS", + "INVALID_POLICY", + "INVALID_ROLE_ARN", + "MANIFEST_NAME_COLLISION", + "ENCRYPTION_METHOD_CONTAINER_TYPE_MISMATCH", + "CENC_IV_INCOMPATIBLE", + "ENCRYPTION_CONTRACT_WITHOUT_AUDIO_RENDITION_INCOMPATIBLE", + "ENCRYPTION_CONTRACT_UNENCRYPTED", + "ENCRYPTION_CONTRACT_SHARED", + "NUM_MANIFESTS_LOW", + "NUM_MANIFESTS_HIGH", + "DRM_SYSTEMS_ENCRYPTION_METHOD_INCOMPATIBLE", + "ROLE_ARN_NOT_ASSUMABLE", + "ROLE_ARN_LENGTH_OUT_OF_RANGE", + "ROLE_ARN_INVALID_FORMAT", + "URL_INVALID", + "URL_SCHEME", + "URL_USER_INFO", + "URL_PORT", + "URL_UNKNOWN_HOST", + "URL_LOCAL_ADDRESS", + "URL_LOOPBACK_ADDRESS", + "URL_LINK_LOCAL_ADDRESS", + "URL_MULTICAST_ADDRESS", + "MEMBER_INVALID", + "MEMBER_MISSING", + "MEMBER_MIN_VALUE", + "MEMBER_MAX_VALUE", + "MEMBER_MIN_LENGTH", + "MEMBER_MAX_LENGTH", + "MEMBER_INVALID_ENUM_VALUE", + "MEMBER_DOES_NOT_MATCH_PATTERN" + ] + } + }, + "documentation":"

      This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2.

      The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources.

      If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference.

      This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols.

      We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide.

      " +} diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/waiters-2.json b/services/mediapackagev2/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/mediapackagev2/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 0ceb666eacee..575170dedc0e 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 8fa9e20a03b6..54abe007fec2 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 5e2797b6be04..8f85199f21e5 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 62f6d4acf69c..6af3c2c39ee0 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index ab659c673182..d691552be573 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/memorydb/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/memorydb/src/main/resources/codegen-resources/endpoint-rule-set.json index 012d61ed506e..4a448006958c 100644 --- a/services/memorydb/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/memorydb/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,98 +111,250 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://memory-db-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://memory-db-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://memory-db-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://memory-db.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "fips" + ] + } + ], + "endpoint": { + "url": "https://memory-db-fips.us-west-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "memorydb", + "signingRegion": "us-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { - "url": "https://memory-db-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://memory-db.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,99 +363,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://memory-db.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "fips" - ] - } - ], - "endpoint": { - "url": "https://memory-db-fips.us-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://memory-db.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/memorydb/src/main/resources/codegen-resources/endpoint-tests.json b/services/memorydb/src/main/resources/codegen-resources/endpoint-tests.json index d47326f8dea9..21db4f60bb99 100644 --- a/services/memorydb/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/memorydb/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,666 +1,168 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.eu-west-1.api.aws" + "url": "https://memory-db.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.eu-west-1.amazonaws.com" + "url": "https://memory-db.ap-northeast-1.amazonaws.com" } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "eu-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-northeast-2.amazonaws.com" + "url": "https://memory-db.ap-northeast-2.amazonaws.com" } }, "params": { - "UseFIPS": true, "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-northeast-2.api.aws" + "url": "https://memory-db.ap-south-1.amazonaws.com" } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-northeast-2.amazonaws.com" + "url": "https://memory-db.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-northeast-1.api.aws" + "url": "https://memory-db.ap-southeast-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-northeast-1.amazonaws.com" + "url": "https://memory-db.ca-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", + "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-northeast-1.api.aws" + "url": "https://memory-db.eu-central-1.amazonaws.com" } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-northeast-1.amazonaws.com" + "url": "https://memory-db.eu-north-1.amazonaws.com" } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "Region": "ap-northeast-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.sa-east-1.api.aws" + "url": "https://memory-db.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.sa-east-1.amazonaws.com" + "url": "https://memory-db.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "sa-east-1", + "Region": "eu-west-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region fips with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.sa-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "memorydb", + "signingRegion": "us-west-1" + } + ] + }, + "url": "https://memory-db-fips.us-west-1.amazonaws.com" } }, "params": { + "Region": "fips", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true + "UseDualStack": false } }, { @@ -671,99 +173,99 @@ } }, "params": { - "UseFIPS": false, "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-east-1.api.aws" + "url": "https://memory-db.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-east-1.amazonaws.com" + "url": "https://memory-db.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-east-1", + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-east-1.api.aws" + "url": "https://memory-db.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-east-1.amazonaws.com" + "url": "https://memory-db.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://memory-db-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "cn-north-1", "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.cn-north-1.amazonaws.com.cn" + "url": "https://memory-db-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://memory-db.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": true } }, @@ -775,273 +277,227 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-southeast-1.api.aws" + "url": "https://memory-db.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-southeast-1.amazonaws.com" + "url": "https://memory-db-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-southeast-1.api.aws" + "url": "https://memory-db-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-southeast-1.amazonaws.com" + "url": "https://memory-db.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-southeast-2.api.aws" + "url": "https://memory-db-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.ap-southeast-2.amazonaws.com" + "url": "https://memory-db-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-southeast-2.api.aws" + "url": "https://memory-db.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.ap-southeast-2.amazonaws.com" + "url": "https://memory-db.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.us-east-1.amazonaws.com" + "url": "https://memory-db-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://memory-db.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.us-east-1.amazonaws.com" + "url": "https://memory-db.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://memory-db-fips.us-east-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db-fips.us-east-2.amazonaws.com" + "url": "https://memory-db-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://memory-db.us-east-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://memory-db.us-east-2.amazonaws.com" + "url": "https://memory-db.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://memory-db-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://memory-db.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://memory-db.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1049,7 +505,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1060,8 +515,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1072,11 +527,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/memorydb/src/main/resources/codegen-resources/service-2.json b/services/memorydb/src/main/resources/codegen-resources/service-2.json index 52e785b4f6f6..7dfa0a3786c7 100644 --- a/services/memorydb/src/main/resources/codegen-resources/service-2.json +++ b/services/memorydb/src/main/resources/codegen-resources/service-2.json @@ -833,7 +833,8 @@ "type":"string", "enum":[ "password", - "no-password" + "no-password", + "iam" ] }, "AvailabilityZone":{ @@ -2111,7 +2112,10 @@ }, "InputAuthenticationType":{ "type":"string", - "enum":["password"] + "enum":[ + "password", + "iam" + ] }, "InsufficientClusterCapacityFault":{ "type":"structure", diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index 64b830ca49a0..184e6299d430 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index e49850a37592..c5a8a2295422 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 8c4c8775c16c..70a8c9d3f1fe 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index 7d691dadcc7e..a4d032b4967e 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index 5aadb846f928..164df339a410 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/service-2.json b/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/service-2.json index a4975e3c4bb7..49b8e17f1339 100644 --- a/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/service-2.json @@ -552,7 +552,7 @@ "members":{ "EndpointType":{ "shape":"ApiGatewayEndpointType", - "documentation":"

      The type of endpoint to use for the API Gateway proxy. If no value is specified in the request, the value is set to REGIONAL by default.

      If the value is set to PRIVATE in the request, this creates a private API endpoint that is isolated from the public internet. The private endpoint can only be accessed by using Amazon Virtual Private Cloud (Amazon VPC) endpoints for Amazon API Gateway that have been granted access.

      " + "documentation":"

      The type of endpoint to use for the API Gateway proxy. If no value is specified in the request, the value is set to REGIONAL by default.

      If the value is set to PRIVATE in the request, this creates a private API endpoint that is isolated from the public internet. The private endpoint can only be accessed by using Amazon Virtual Private Cloud (Amazon VPC) interface endpoints for the Amazon API Gateway that has been granted access. For more information about creating a private connection with Refactor Spaces and interface endpoint (Amazon Web Services PrivateLink) availability, see Access Refactor Spaces using an interface endpoint (Amazon Web Services PrivateLink).

      " }, "StageName":{ "shape":"StageName", @@ -1761,6 +1761,10 @@ "GetRouteResponse":{ "type":"structure", "members":{ + "AppendSourcePath":{ + "shape":"Boolean", + "documentation":"

      If set to true, this option appends the source path to the service URL endpoint.

      " + }, "ApplicationId":{ "shape":"ApplicationId", "documentation":"

      The ID of the application that the route belongs to.

      " @@ -1819,7 +1823,7 @@ }, "SourcePath":{ "shape":"UriPath", - "documentation":"

      The path to use to match traffic. Paths must start with / and are relative to the base of the application.

      " + "documentation":"

      This is the path that Refactor Spaces uses to match traffic. Paths must start with / and are relative to the base of the application. To use path parameters in the source path, add a variable in curly braces. For example, the resource path {user} represents a path parameter called 'user'.

      " }, "State":{ "shape":"RouteState", @@ -2380,6 +2384,10 @@ "RouteSummary":{ "type":"structure", "members":{ + "AppendSourcePath":{ + "shape":"Boolean", + "documentation":"

      If set to true, this option appends the source path to the service URL endpoint.

      " + }, "ApplicationId":{ "shape":"ApplicationId", "documentation":"

      The unique identifier of the application.

      " @@ -2438,7 +2446,7 @@ }, "SourcePath":{ "shape":"UriPath", - "documentation":"

      The path to use to match traffic. Paths must start with / and are relative to the base of the application.

      " + "documentation":"

      This is the path that Refactor Spaces uses to match traffic. Paths must start with / and are relative to the base of the application. To use path parameters in the source path, add a variable in curly braces. For example, the resource path {user} represents a path parameter called 'user'.

      " }, "State":{ "shape":"RouteState", @@ -2786,7 +2794,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(/[a-zA-Z0-9._-]+)+$" + "pattern":"^(/([a-zA-Z0-9._:-]+|\\{[a-zA-Z0-9._:-]+\\}))+$" }, "UriPathRouteInput":{ "type":"structure", @@ -2799,6 +2807,10 @@ "shape":"RouteActivationState", "documentation":"

      If set to ACTIVE, traffic is forwarded to this route’s service after the route is created.

      " }, + "AppendSourcePath":{ + "shape":"Boolean", + "documentation":"

      If set to true, this option appends the source path to the service URL endpoint.

      " + }, "IncludeChildPaths":{ "shape":"Boolean", "documentation":"

      Indicates whether to match all subpaths of the given source path. If this value is false, requests must match the source path exactly before they are forwarded to this route's service.

      " @@ -2809,7 +2821,7 @@ }, "SourcePath":{ "shape":"UriPath", - "documentation":"

      The path to use to match traffic. Paths must start with / and are relative to the base of the application.

      " + "documentation":"

      This is the path that Refactor Spaces uses to match traffic. Paths must start with / and are relative to the base of the application. To use path parameters in the source path, add a variable in curly braces. For example, the resource path {user} represents a path parameter called 'user'.

      " } }, "documentation":"

      The configuration for the URI path route type.

      " diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index b54df121aa2e..b5841d6d66fc 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 51a572b1bccb..6c93d9639927 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 8c9a48a4e036..665fe45e3af5 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 mq diff --git a/services/mq/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mq/src/main/resources/codegen-resources/endpoint-rule-set.json index 896427a06b28..d18215d80030 100644 --- a/services/mq/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mq/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mq-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mq-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mq-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mq.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://mq-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mq.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mq.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://mq.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/mq/src/main/resources/codegen-resources/endpoint-tests.json b/services/mq/src/main/resources/codegen-resources/endpoint-tests.json index 92c4cdcc2fae..58b22f3c279c 100644 --- a/services/mq/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/mq/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,250 +1,120 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-gov-east-1.api.aws" + "url": "https://mq.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-gov-east-1.amazonaws.com" + "url": "https://mq.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.me-central-1.api.aws" + "url": "https://mq.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.me-central-1.amazonaws.com" + "url": "https://mq.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.me-central-1.api.aws" + "url": "https://mq.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.me-central-1.amazonaws.com" + "url": "https://mq.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ca-central-1.api.aws" + "url": "https://mq.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ca-central-1.amazonaws.com" + "url": "https://mq.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ca-central-1.api.aws" + "url": "https://mq.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -255,48 +125,9 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,1149 +138,421 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-west-2.amazonaws.com" + "url": "https://mq.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://mq.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mq-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-gov-west-1.amazonaws.com" + "url": "https://mq.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-gov-west-1.api.aws" + "url": "https://mq.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-gov-west-1.amazonaws.com" + "url": "https://mq.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-1.api.aws" + "url": "https://mq.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-1.amazonaws.com" + "url": "https://mq.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-1.api.aws" + "url": "https://mq.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-1.amazonaws.com" + "url": "https://mq.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-2.api.aws" + "url": "https://mq-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-2.amazonaws.com" + "url": "https://mq.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-2.api.aws" + "url": "https://mq-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-2.amazonaws.com" + "url": "https://mq.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://mq-fips.us-west-1.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://mq.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://mq-fips.us-west-2.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq.us-iso-east-1.c2s.ic.gov" + "url": "https://mq-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-3.api.aws" + "url": "https://mq.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.ap-southeast-3.amazonaws.com" + "url": "https://mq.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-3.api.aws" + "url": "https://mq.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq.ap-southeast-3.amazonaws.com" + "url": "https://mq-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-east-1.api.aws" + "url": "https://mq-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-east-1.amazonaws.com" + "url": "https://mq.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-east-1.api.aws" + "url": "https://mq.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.us-east-1.amazonaws.com" + "url": "https://mq-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-east-2.api.aws" + "url": "https://mq.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.us-east-2.amazonaws.com" + "url": "https://mq-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq.us-east-2.api.aws" + "url": "https://mq-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mq.us-east-2.amazonaws.com" + "url": "https://mq.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mq-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://mq-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mq.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mq.cn-northwest-1.amazonaws.com.cn" + "url": "https://mq.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1458,9 +561,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -1471,9 +574,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -1482,9 +585,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -1495,22 +598,35 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1520,9 +636,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1532,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/mq/src/main/resources/codegen-resources/service-2.json b/services/mq/src/main/resources/codegen-resources/service-2.json index e16cfd04b593..f10c53475ed5 100644 --- a/services/mq/src/main/resources/codegen-resources/service-2.json +++ b/services/mq/src/main/resources/codegen-resources/service-2.json @@ -41,7 +41,7 @@ "shape" : "ForbiddenException", "documentation" : "

      HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

      " } ], - "documentation" : "

      Creates a broker. Note: This API is asynchronous.

      To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM policy.

      • ec2:CreateNetworkInterface

        This permission is required to allow Amazon MQ to create an elastic network interface (ENI) on behalf of your account.

      • ec2:CreateNetworkInterfacePermission

        This permission is required to attach the ENI to the broker instance.

      • ec2:DeleteNetworkInterface

      • ec2:DeleteNetworkInterfacePermission

      • ec2:DetachNetworkInterface

      • ec2:DescribeInternetGateways

      • ec2:DescribeNetworkInterfaces

      • ec2:DescribeNetworkInterfacePermissions

      • ec2:DescribeRouteTables

      • ec2:DescribeSecurityGroups

      • ec2:DescribeSubnets

      • ec2:DescribeVpcs

      For more information, see Create an IAM User and Get Your AWS Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in the Amazon MQ Developer Guide.

      " + "documentation" : "

      Creates a broker. Note: This API is asynchronous.

      To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM policy.

      • ec2:CreateNetworkInterface

        This permission is required to allow Amazon MQ to create an elastic network interface (ENI) on behalf of your account.

      • ec2:CreateNetworkInterfacePermission

        This permission is required to attach the ENI to the broker instance.

      • ec2:DeleteNetworkInterface

      • ec2:DeleteNetworkInterfacePermission

      • ec2:DetachNetworkInterface

      • ec2:DescribeInternetGateways

      • ec2:DescribeNetworkInterfaces

      • ec2:DescribeNetworkInterfacePermissions

      • ec2:DescribeRouteTables

      • ec2:DescribeSecurityGroups

      • ec2:DescribeSubnets

      • ec2:DescribeVpcs

      For more information, see Create an IAM User and Get Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in the Amazon MQ Developer Guide.

      " }, "CreateConfiguration" : { "name" : "CreateConfiguration", @@ -127,7 +127,7 @@ "shape" : "ForbiddenException", "documentation" : "

      HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

      " } ], - "documentation" : "

      Creates an ActiveMQ user.

      " + "documentation" : "

      Creates an ActiveMQ user.

      Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

      " }, "DeleteBroker" : { "name" : "DeleteBroker", @@ -519,6 +519,35 @@ } ], "documentation" : "

      Returns a list of all ActiveMQ users.

      " }, + "Promote" : { + "name" : "Promote", + "http" : { + "method" : "POST", + "requestUri" : "/v1/brokers/{broker-id}/promote", + "responseCode" : 200 + }, + "input" : { + "shape" : "PromoteRequest" + }, + "output" : { + "shape" : "PromoteResponse", + "documentation" : "

      HTTP Status Code 200: OK.

      " + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "

      HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

      " + }, { + "shape" : "BadRequestException", + "documentation" : "

      HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

      " + }, { + "shape" : "InternalServerErrorException", + "documentation" : "

      HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

      " + }, { + "shape" : "ForbiddenException", + "documentation" : "

      HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

      " + } ], + "documentation" : "

      Promotes a data replication replica broker to the primary broker role.

      " + }, "RebootBroker" : { "name" : "RebootBroker", "http" : { @@ -652,15 +681,15 @@ "ActionRequiredCode" : { "shape" : "__string", "locationName" : "actionRequiredCode", - "documentation" : "

      The code you can use to resolve your broker issue when the broker is in a CRITICAL_ACTION_REQUIRED state. You can find instructions by choosing the link for your code from the list of action required codes in Amazon MQ action required codes. Each code references a topic with detailed information, instructions, and recommendations for how to resolve the issue and prevent future occurrences.

      " + "documentation" : "

      The code you can use to find instructions on the action required to resolve your broker issue.

      " }, "ActionRequiredInfo" : { "shape" : "__string", "locationName" : "actionRequiredInfo", - "documentation" : "

      Information about the action required to resolve your broker issue when the broker is in a CRITICAL_ACTION_REQUIRED state.

      " + "documentation" : "

      Information about the action required to resolve your broker issue.

      " } }, - "documentation" : "

      The action required to resolve a broker issue when the broker is in a CRITICAL_ACTION_REQUIRED state.

      " + "documentation" : "

      Action required for a broker.

      " }, "AuthenticationStrategy" : { "type" : "string", @@ -818,7 +847,7 @@ "BrokerState" : { "type" : "string", "documentation" : "

      The broker's status.

      ", - "enum" : [ "CREATION_IN_PROGRESS", "CREATION_FAILED", "DELETION_IN_PROGRESS", "RUNNING", "REBOOT_IN_PROGRESS", "CRITICAL_ACTION_REQUIRED" ] + "enum" : [ "CREATION_IN_PROGRESS", "CREATION_FAILED", "DELETION_IN_PROGRESS", "RUNNING", "REBOOT_IN_PROGRESS", "CRITICAL_ACTION_REQUIRED", "REPLICA" ] }, "BrokerStorageType" : { "type" : "string", @@ -841,7 +870,7 @@ "BrokerName" : { "shape" : "__string", "locationName" : "brokerName", - "documentation" : "

      The broker's name. This value is unique in your AWS account, 1-50 characters long, and containing only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " + "documentation" : "

      The broker's name. This value is unique in your Amazon Web Services account, 1-50 characters long, and containing only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " }, "BrokerState" : { "shape" : "BrokerState", @@ -948,7 +977,7 @@ "documentation" : "

      The revision number of the configuration.

      " } }, - "documentation" : "

      A list of information about the configuration.

      Does not apply to RabbitMQ brokers.

      ", + "documentation" : "

      A list of information about the configuration.

      ", "required" : [ "Id" ] }, "ConfigurationRevision" : { @@ -1030,7 +1059,7 @@ "BrokerName" : { "shape" : "__string", "locationName" : "brokerName", - "documentation" : "

      Required. The broker's name. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " + "documentation" : "

      Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

      " }, "Configuration" : { "shape" : "ConfigurationId", @@ -1040,7 +1069,7 @@ "CreatorRequestId" : { "shape" : "__string", "locationName" : "creatorRequestId", - "documentation" : "

      The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. Note: We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

      ", + "documentation" : "

      The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

      We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

      ", "idempotencyToken" : true }, "DeploymentMode" : { @@ -1048,10 +1077,20 @@ "locationName" : "deploymentMode", "documentation" : "

      Required. The broker's deployment mode.

      " }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Defines whether this broker is a part of a data replication pair.

      " + }, + "DataReplicationPrimaryBrokerArn" : { + "shape" : "__string", + "locationName" : "dataReplicationPrimaryBrokerArn", + "documentation" : "

      The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

      " + }, "EncryptionOptions" : { "shape" : "EncryptionOptions", "locationName" : "encryptionOptions", - "documentation" : "

      Encryption options for the broker. Does not apply to RabbitMQ brokers.

      " + "documentation" : "

      Encryption options for the broker.

      " }, "EngineType" : { "shape" : "EngineType", @@ -1101,7 +1140,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "

      The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

      If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your AWS account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your AWS account.

      " + "documentation" : "

      The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

      If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

      " }, "Tags" : { "shape" : "__mapOf__string", @@ -1111,7 +1150,7 @@ "Users" : { "shape" : "__listOfUser", "locationName" : "users", - "documentation" : "

      Required. The list of broker users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      Amazon MQ for RabbitMQ

      When you create an Amazon MQ for RabbitMQ broker, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      " + "documentation" : "

      The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      " } }, "documentation" : "

      Creates a broker.

      ", @@ -1149,7 +1188,7 @@ "BrokerName" : { "shape" : "__string", "locationName" : "brokerName", - "documentation" : "

      Required. The broker's name. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " + "documentation" : "

      Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      Do not add personally identifiable information (PII) or other confidential or sensitive information in broker names. Broker names are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker names are not intended to be used for private or sensitive data.

      " }, "Configuration" : { "shape" : "ConfigurationId", @@ -1159,7 +1198,7 @@ "CreatorRequestId" : { "shape" : "__string", "locationName" : "creatorRequestId", - "documentation" : "

      The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. Note: We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

      ", + "documentation" : "

      The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action.

      We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency.

      ", "idempotencyToken" : true }, "DeploymentMode" : { @@ -1170,7 +1209,7 @@ "EncryptionOptions" : { "shape" : "EncryptionOptions", "locationName" : "encryptionOptions", - "documentation" : "

      Encryption options for the broker. Does not apply to RabbitMQ brokers.

      " + "documentation" : "

      Encryption options for the broker.

      " }, "EngineType" : { "shape" : "EngineType", @@ -1220,7 +1259,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "

      The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

      If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your AWS account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your AWS account.

      " + "documentation" : "

      The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. If you specify more than one subnet, the subnets must be in different Availability Zones. Amazon MQ will not be able to create VPC endpoints for your broker with multiple subnets in the same Availability Zone. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet.

      If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to which the specified subnets belong must be owned by your Amazon Web Services account. Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your Amazon Web Services account.

      " }, "Tags" : { "shape" : "__mapOf__string", @@ -1230,7 +1269,17 @@ "Users" : { "shape" : "__listOfUser", "locationName" : "users", - "documentation" : "

      Required. The list of broker users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      Amazon MQ for RabbitMQ

      When you create an Amazon MQ for RabbitMQ broker, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      " + "documentation" : "

      The list of broker users (persons or applications) who can access queues and topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Defines whether this broker is a part of a data replication pair.

      " + }, + "DataReplicationPrimaryBrokerArn" : { + "shape" : "__string", + "locationName" : "dataReplicationPrimaryBrokerArn", + "documentation" : "

      The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR.

      " } }, "documentation" : "

      Creates a broker using the specified properties.

      ", @@ -1422,6 +1471,11 @@ "shape" : "__string", "locationName" : "password", "documentation" : "

      Required. The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Defines if this user is intended for CRDR replication purposes.

      " } }, "documentation" : "

      Creates a new ActiveMQ user.

      ", @@ -1456,6 +1510,11 @@ "location" : "uri", "locationName" : "username", "documentation" : "

      The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Defines if this user is intended for CRDR replication purposes.

      " } }, "documentation" : "

      Creates a new ActiveMQ user.

      ", @@ -1465,6 +1524,45 @@ "type" : "structure", "members" : { } }, + "DataReplicationCounterpart" : { + "type" : "structure", + "members" : { + "BrokerId" : { + "shape" : "__string", + "locationName" : "brokerId", + "documentation" : "

      Required. The unique broker id generated by Amazon MQ.

      " + }, + "Region" : { + "shape" : "__string", + "locationName" : "region", + "documentation" : "

      Required. The region of the broker.

      " + } + }, + "documentation" : "

      Specifies a broker in a data replication pair.

      ", + "required" : [ "BrokerId", "Region" ] + }, + "DataReplicationMetadataOutput" : { + "type" : "structure", + "members" : { + "DataReplicationCounterpart" : { + "shape" : "DataReplicationCounterpart", + "locationName" : "dataReplicationCounterpart", + "documentation" : "

      Describes the replica/primary broker. Only returned if this broker is currently set as a primary or replica in the broker's dataReplicationRole property.

      " + }, + "DataReplicationRole" : { + "shape" : "__string", + "locationName" : "dataReplicationRole", + "documentation" : "

      Defines the role of this broker in a data replication pair. When a replica broker is promoted to primary, this role is interchanged.

      " + } + }, + "documentation" : "

      The replication details of the data replication-enabled broker. Only returned if dataReplicationMode or pendingDataReplicationMode is set to CRDR.

      ", + "required" : [ "DataReplicationRole" ] + }, + "DataReplicationMode" : { + "type" : "string", + "documentation" : "

      Specifies whether a broker is a part of a data replication pair.

      ", + "enum" : [ "NONE", "CRDR" ] + }, "DayOfWeek" : { "type" : "string", "enum" : [ "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY" ] @@ -1651,7 +1749,7 @@ "ActionsRequired" : { "shape" : "__listOfActionRequired", "locationName" : "actionsRequired", - "documentation" : "

      A list of actions required for a broker.

      " + "documentation" : "

      Actions required for a broker.

      " }, "AuthenticationStrategy" : { "shape" : "AuthenticationStrategy", @@ -1681,7 +1779,7 @@ "BrokerName" : { "shape" : "__string", "locationName" : "brokerName", - "documentation" : "

      The broker's name. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " + "documentation" : "

      The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " }, "BrokerState" : { "shape" : "BrokerState", @@ -1703,10 +1801,20 @@ "locationName" : "deploymentMode", "documentation" : "

      The broker's deployment mode.

      " }, + "DataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "dataReplicationMetadata", + "documentation" : "

      The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Describes whether this broker is a part of a data replication pair.

      " + }, "EncryptionOptions" : { "shape" : "EncryptionOptions", "locationName" : "encryptionOptions", - "documentation" : "

      Encryption options for the broker. Does not apply to RabbitMQ brokers.

      " + "documentation" : "

      Encryption options for the broker.

      " }, "EngineType" : { "shape" : "EngineType", @@ -1743,6 +1851,16 @@ "locationName" : "pendingAuthenticationStrategy", "documentation" : "

      The authentication strategy that will be applied when the broker is rebooted. The default is SIMPLE.

      " }, + "PendingDataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "pendingDataReplicationMetadata", + "documentation" : "

      The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

      " + }, + "PendingDataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "pendingDataReplicationMode", + "documentation" : "

      Describes whether this broker will be a part of a data replication pair after reboot.

      " + }, "PendingEngineVersion" : { "shape" : "__string", "locationName" : "pendingEngineVersion", @@ -1815,7 +1933,7 @@ "ActionsRequired" : { "shape" : "__listOfActionRequired", "locationName" : "actionsRequired", - "documentation" : "

      A list of actions required for a broker.

      " + "documentation" : "

      Actions required for a broker.

      " }, "AuthenticationStrategy" : { "shape" : "AuthenticationStrategy", @@ -1845,7 +1963,7 @@ "BrokerName" : { "shape" : "__string", "locationName" : "brokerName", - "documentation" : "

      The broker's name. This value must be unique in your AWS account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " + "documentation" : "

      The broker's name. This value must be unique in your Amazon Web Services account account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special characters.

      " }, "BrokerState" : { "shape" : "BrokerState", @@ -1870,7 +1988,7 @@ "EncryptionOptions" : { "shape" : "EncryptionOptions", "locationName" : "encryptionOptions", - "documentation" : "

      Encryption options for the broker. Does not apply to RabbitMQ brokers.

      " + "documentation" : "

      Encryption options for the broker.

      " }, "EngineType" : { "shape" : "EngineType", @@ -1956,6 +2074,26 @@ "shape" : "__listOfUserSummary", "locationName" : "users", "documentation" : "

      The list of all broker usernames for the specified broker.

      " + }, + "DataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "dataReplicationMetadata", + "documentation" : "

      The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Describes whether this broker is a part of a data replication pair.

      " + }, + "PendingDataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "pendingDataReplicationMetadata", + "documentation" : "

      The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

      " + }, + "PendingDataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "pendingDataReplicationMode", + "documentation" : "

      Describes whether this broker will be a part of a data replication pair after reboot.

      " } } }, @@ -2042,7 +2180,7 @@ "Data" : { "shape" : "__string", "locationName" : "data", - "documentation" : "

      Required. The base64-encoded XML configuration.

      " + "documentation" : "

      Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

      " }, "Description" : { "shape" : "__string", @@ -2087,7 +2225,7 @@ "Data" : { "shape" : "__string", "locationName" : "data", - "documentation" : "

      Required. The base64-encoded XML configuration.

      " + "documentation" : "

      Amazon MQ for ActiveMQ: the base64-encoded XML configuration. Amazon MQ for RabbitMQ: base64-encoded Cuttlefish.

      " }, "Description" : { "shape" : "__string", @@ -2119,6 +2257,11 @@ "locationName" : "pending", "documentation" : "

      The status of the changes pending for the ActiveMQ user.

      " }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Describes whether the user is intended for data replication

      " + }, "Username" : { "shape" : "__string", "locationName" : "username", @@ -2173,6 +2316,11 @@ "shape" : "__string", "locationName" : "username", "documentation" : "

      Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Describes whether the user is intended for data replication

      " } } }, @@ -2182,15 +2330,15 @@ "KmsKeyId" : { "shape" : "__string", "locationName" : "kmsKeyId", - "documentation" : "

      The customer master key (CMK) to use for the AWS Key Management Service (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.

      " + "documentation" : "

      The customer master key (CMK) to use for the A KMS (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.

      " }, "UseAwsOwnedKey" : { "shape" : "__boolean", "locationName" : "useAwsOwnedKey", - "documentation" : "

      Enables the use of an AWS owned CMK using AWS Key Management Service (KMS). Set to true by default, if no value is provided, for example, for RabbitMQ brokers.

      " + "documentation" : "

      Enables the use of an Amazon Web Services owned CMK using KMS (KMS). Set to true by default, if no value is provided, for example, for RabbitMQ brokers.

      " } }, - "documentation" : "

      Does not apply to RabbitMQ brokers.

      Encryption options for the broker.

      ", + "documentation" : "

      Encryption options for the broker.

      ", "required" : [ "UseAwsOwnedKey" ] }, "EngineType" : { @@ -2271,7 +2419,7 @@ "Hosts" : { "shape" : "__listOf__string", "locationName" : "hosts", - "documentation" : "

      Specifies the location of the LDAP server such as AWS Directory Service for Microsoft Active Directory . Optional failover server.

      " + "documentation" : "

      Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

      " }, "RoleBase" : { "shape" : "__string", @@ -2333,7 +2481,7 @@ "Hosts" : { "shape" : "__listOf__string", "locationName" : "hosts", - "documentation" : "

      Specifies the location of the LDAP server such as AWS Directory Service for Microsoft Active Directory . Optional failover server.

      " + "documentation" : "

      Specifies the location of the LDAP server such as Directory Service for Microsoft Active Directory. Optional failover server.

      " }, "RoleBase" : { "shape" : "__string", @@ -2397,7 +2545,8 @@ "locationName" : "nextToken", "documentation" : "

      The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.

      " } - } + }, + "documentation" : "

      A list of information about all brokers.

      " }, "ListBrokersRequest" : { "type" : "structure", @@ -2751,6 +2900,62 @@ }, "documentation" : "

      The list of information about logs to be enabled for the specified broker.

      " }, + "PromoteInput" : { + "type" : "structure", + "members" : { + "Mode" : { + "shape" : "PromoteMode", + "locationName" : "mode", + "documentation" : "

      The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

      " + } + }, + "documentation" : "

      Creates a Promote request with the properties specified.

      ", + "required" : [ "Mode" ] + }, + "PromoteMode" : { + "type" : "string", + "documentation" : "

      The Promote mode requested.

      ", + "enum" : [ "SWITCHOVER", "FAILOVER" ] + }, + "PromoteOutput" : { + "type" : "structure", + "members" : { + "BrokerId" : { + "shape" : "__string", + "locationName" : "brokerId", + "documentation" : "

      The unique ID that Amazon MQ generates for the broker.

      " + } + }, + "documentation" : "

      Returns information about the updated broker.

      " + }, + "PromoteRequest" : { + "type" : "structure", + "members" : { + "BrokerId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "broker-id", + "documentation" : "

      The unique ID that Amazon MQ generates for the broker.

      " + }, + "Mode" : { + "shape" : "PromoteMode", + "locationName" : "mode", + "documentation" : "

      The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, FAILOVER.

      " + } + }, + "documentation" : "

      Promotes a data replication replica broker to the primary broker role.

      ", + "required" : [ "BrokerId", "Mode" ] + }, + "PromoteResponse" : { + "type" : "structure", + "members" : { + "BrokerId" : { + "shape" : "__string", + "locationName" : "brokerId", + "documentation" : "

      The unique ID that Amazon MQ generates for the broker.

      " + } + } + }, "RebootBrokerRequest" : { "type" : "structure", "members" : { @@ -2773,25 +2978,25 @@ "AttributeName" : { "shape" : "__string", "locationName" : "attributeName", - "documentation" : "

      The name of the XML attribute that has been sanitized.

      " + "documentation" : "

      The name of the configuration attribute that has been sanitized.

      " }, "ElementName" : { "shape" : "__string", "locationName" : "elementName", - "documentation" : "

      The name of the XML element that has been sanitized.

      " + "documentation" : "

      The name of the configuration element that has been sanitized.

      " }, "Reason" : { "shape" : "SanitizationWarningReason", "locationName" : "reason", - "documentation" : "

      Required. The reason for which the XML elements or attributes were sanitized.

      " + "documentation" : "

      The reason for which the configuration elements or attributes were sanitized.

      " } }, - "documentation" : "

      Returns information about the XML element or attribute that was sanitized in the configuration.

      ", + "documentation" : "

      Returns information about the configuration element or attribute that was sanitized in the configuration.

      ", "required" : [ "Reason" ] }, "SanitizationWarningReason" : { "type" : "string", - "documentation" : "

      The reason for which the XML elements or attributes were sanitized.

      ", + "documentation" : "

      The reason for which the configuration elements or attributes were sanitized.

      ", "enum" : [ "DISALLOWED_ELEMENT_REMOVED", "DISALLOWED_ATTRIBUTE_REMOVED", "INVALID_ATTRIBUTE_VALUE_REMOVED" ] }, "Tags" : { @@ -2843,6 +3048,11 @@ "locationName" : "configuration", "documentation" : "

      A list of information about the configuration.

      " }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Defines whether this broker is a part of a data replication pair.

      " + }, "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", @@ -2899,6 +3109,16 @@ "locationName" : "configuration", "documentation" : "

      The ID of the updated configuration.

      " }, + "DataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "dataReplicationMetadata", + "documentation" : "

      The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Describes whether this broker is a part of a data replication pair.

      " + }, "EngineVersion" : { "shape" : "__string", "locationName" : "engineVersion", @@ -2924,6 +3144,16 @@ "locationName" : "maintenanceWindowStartTime", "documentation" : "

      The parameters that determine the WeeklyStartTime.

      " }, + "PendingDataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "pendingDataReplicationMetadata", + "documentation" : "

      The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

      " + }, + "PendingDataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "pendingDataReplicationMode", + "documentation" : "

      Describes whether this broker will be a part of a data replication pair after reboot.

      " + }, "SecurityGroups" : { "shape" : "__listOf__string", "locationName" : "securityGroups", @@ -2986,6 +3216,11 @@ "shape" : "__listOf__string", "locationName" : "securityGroups", "documentation" : "

      The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Defines whether this broker is a part of a data replication pair.

      " } }, "documentation" : "

      Updates the broker using the specified properties.

      ", @@ -3043,6 +3278,26 @@ "shape" : "__listOf__string", "locationName" : "securityGroups", "documentation" : "

      The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.

      " + }, + "DataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "dataReplicationMetadata", + "documentation" : "

      The replication details of the data replication-enabled broker. Only returned if dataReplicationMode is set to CRDR.

      " + }, + "DataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "dataReplicationMode", + "documentation" : "

      Describes whether this broker is a part of a data replication pair.

      " + }, + "PendingDataReplicationMetadata" : { + "shape" : "DataReplicationMetadataOutput", + "locationName" : "pendingDataReplicationMetadata", + "documentation" : "

      The pending replication details of the data replication-enabled broker. Only returned if pendingDataReplicationMode is set to CRDR.

      " + }, + "PendingDataReplicationMode" : { + "shape" : "DataReplicationMode", + "locationName" : "pendingDataReplicationMode", + "documentation" : "

      Describes whether this broker will be a part of a data replication pair after reboot.

      " } } }, @@ -3052,7 +3307,7 @@ "Data" : { "shape" : "__string", "locationName" : "data", - "documentation" : "

      Required. The base64-encoded XML configuration.

      " + "documentation" : "

      Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

      " }, "Description" : { "shape" : "__string", @@ -3069,7 +3324,7 @@ "Arn" : { "shape" : "__string", "locationName" : "arn", - "documentation" : "

      Required. The Amazon Resource Name (ARN) of the configuration.

      " + "documentation" : "

      The Amazon Resource Name (ARN) of the configuration.

      " }, "Created" : { "shape" : "__timestampIso8601", @@ -3079,7 +3334,7 @@ "Id" : { "shape" : "__string", "locationName" : "id", - "documentation" : "

      Required. The unique ID that Amazon MQ generates for the configuration.

      " + "documentation" : "

      The unique ID that Amazon MQ generates for the configuration.

      " }, "LatestRevision" : { "shape" : "ConfigurationRevision", @@ -3089,12 +3344,12 @@ "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

      Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

      " + "documentation" : "

      The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

      " }, "Warnings" : { "shape" : "__listOfSanitizationWarning", "locationName" : "warnings", - "documentation" : "

      The list of the first 20 warnings about the configuration XML elements or attributes that were sanitized.

      " + "documentation" : "

      The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

      " } }, "documentation" : "

      Returns information about the updated configuration.

      ", @@ -3112,7 +3367,7 @@ "Data" : { "shape" : "__string", "locationName" : "data", - "documentation" : "

      Required. The base64-encoded XML configuration.

      " + "documentation" : "

      Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for RabbitMQ: the base64-encoded Cuttlefish configuration.

      " }, "Description" : { "shape" : "__string", @@ -3129,7 +3384,7 @@ "Arn" : { "shape" : "__string", "locationName" : "arn", - "documentation" : "

      Required. The Amazon Resource Name (ARN) of the configuration.

      " + "documentation" : "

      The Amazon Resource Name (ARN) of the configuration.

      " }, "Created" : { "shape" : "__timestampIso8601", @@ -3139,7 +3394,7 @@ "Id" : { "shape" : "__string", "locationName" : "id", - "documentation" : "

      Required. The unique ID that Amazon MQ generates for the configuration.

      " + "documentation" : "

      The unique ID that Amazon MQ generates for the configuration.

      " }, "LatestRevision" : { "shape" : "ConfigurationRevision", @@ -3149,12 +3404,12 @@ "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "

      Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

      " + "documentation" : "

      The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long.

      " }, "Warnings" : { "shape" : "__listOfSanitizationWarning", "locationName" : "warnings", - "documentation" : "

      The list of the first 20 warnings about the configuration XML elements or attributes that were sanitized.

      " + "documentation" : "

      The list of the first 20 warnings about the configuration elements or attributes that were sanitized.

      " } } }, @@ -3175,6 +3430,11 @@ "shape" : "__string", "locationName" : "password", "documentation" : "

      The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Defines whether the user is intended for data replication.

      " } }, "documentation" : "

      Updates the information for an ActiveMQ user.

      " @@ -3208,6 +3468,11 @@ "location" : "uri", "locationName" : "username", "documentation" : "

      The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Defines whether the user is intended for data replication.

      " } }, "documentation" : "

      Updates the information for an ActiveMQ user.

      ", @@ -3238,10 +3503,15 @@ "Username" : { "shape" : "__string", "locationName" : "username", - "documentation" : "

      important>Amazon MQ for ActiveMQ For ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      /important> Amazon MQ for RabbitMQ

      For RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.

      " + "documentation" : "

      The username of the broker user. The following restrictions apply to broker usernames:

      • For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.

      • para>For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.

      Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not intended to be used for private or sensitive data.

      " + }, + "ReplicationUser" : { + "shape" : "__boolean", + "locationName" : "replicationUser", + "documentation" : "

      Defines if this user is intended for CRDR replication purposes.

      " } }, - "documentation" : "

      A user associated with the broker. For RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      ", + "documentation" : "

      A user associated with the broker. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console.

      ", "required" : [ "Username", "Password" ] }, "UserPendingChanges" : { @@ -3433,15 +3703,5 @@ "timestampFormat" : "unixTimestamp" } }, - "authorizers" : { - "authorization_strategy" : { - "name" : "authorization_strategy", - "type" : "provided", - "placement" : { - "location" : "header", - "name" : "Authorization" - } - } - }, "documentation" : "

      Amazon MQ is a managed message broker service for Apache ActiveMQ and RabbitMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols.

      " } \ No newline at end of file diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index b90f0de58994..0f805ef4503b 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index f9a9c6213ec9..8f3f442a3b0b 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/mwaa/src/main/resources/codegen-resources/customization.config b/services/mwaa/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/mwaa/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/mwaa/src/main/resources/codegen-resources/endpoint-tests.json b/services/mwaa/src/main/resources/codegen-resources/endpoint-tests.json index 2f79f36beac4..87bc8970e1fd 100644 --- a/services/mwaa/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/mwaa/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -216,9 +216,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -242,9 +242,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -255,9 +255,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -281,9 +281,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -307,9 +307,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -333,9 +333,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -344,9 +344,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -357,9 +357,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -368,9 +368,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -381,9 +381,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -392,9 +392,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -405,9 +405,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -416,9 +416,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -429,9 +429,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -442,9 +442,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -467,9 +467,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -479,9 +479,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/services/mwaa/src/main/resources/codegen-resources/service-2.json b/services/mwaa/src/main/resources/codegen-resources/service-2.json index 40802ad51444..a58562a3f9b5 100644 --- a/services/mwaa/src/main/resources/codegen-resources/service-2.json +++ b/services/mwaa/src/main/resources/codegen-resources/service-2.json @@ -285,7 +285,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

      The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, and 2.4.3. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

      " + "documentation":"

      The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, and 2.5.1. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

      " }, "DagS3Path":{ "shape":"RelativePath", @@ -459,7 +459,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

      The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, and 2.4.3.

      " + "documentation":"

      The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, and 2.5.1.

      " }, "Arn":{ "shape":"EnvironmentArn", @@ -547,7 +547,7 @@ }, "Status":{ "shape":"EnvironmentStatus", - "documentation":"

      The status of the Amazon MWAA environment. Valid values:

      • CREATING - Indicates the request to create the environment is in progress.

      • CREATE_FAILED - Indicates the request to create the environment failed, and the environment could not be created.

      • AVAILABLE - Indicates the request was successful and the environment is ready to use.

      • UPDATING - Indicates the request to update the environment is in progress.

      • DELETING - Indicates the request to delete the environment is in progress.

      • DELETED - Indicates the request to delete the environment is complete, and the environment has been deleted.

      • UNAVAILABLE - Indicates the request failed, but the environment was unable to rollback and is not in a stable state.

      • UPDATE_FAILED - Indicates the request to update the environment failed, and the environment has rolled back successfully and is ready to use.

      We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

      " + "documentation":"

      The status of the Amazon MWAA environment. Valid values:

      • CREATING - Indicates the request to create the environment is in progress.

      • CREATING_SNAPSHOT - Indicates the request to update environment details, or upgrade the environment version, is in progress and Amazon MWAA is creating a storage volume snapshot of the Amazon RDS database cluster associated with the environment. A database snapshot is a backup created at a specific point in time. Amazon MWAA uses snapshots to recover environment metadata if the process to update or upgrade an environment fails.

      • CREATE_FAILED - Indicates the request to create the environment failed, and the environment could not be created.

      • AVAILABLE - Indicates the request was successful and the environment is ready to use.

      • UPDATING - Indicates the request to update the environment is in progress.

      • ROLLING_BACK - Indicates the request to update environment details, or upgrade the environment version, failed and Amazon MWAA is restoring the environment using the latest storage volume snapshot.

      • DELETING - Indicates the request to delete the environment is in progress.

      • DELETED - Indicates the request to delete the environment is complete, and the environment has been deleted.

      • UNAVAILABLE - Indicates the request failed, but the environment was unable to rollback and is not in a stable state.

      • UPDATE_FAILED - Indicates the request to update the environment failed, and the environment has rolled back successfully and is ready to use.

      We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

      " }, "Tags":{ "shape":"TagMap", @@ -599,7 +599,9 @@ "DELETING", "DELETED", "UNAVAILABLE", - "UPDATE_FAILED" + "UPDATE_FAILED", + "ROLLING_BACK", + "CREATING_SNAPSHOT" ] }, "ErrorCode":{"type":"string"}, @@ -1139,7 +1141,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

      The Apache Airflow version for your environment. If no value is specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, and 2.4.3.

      " + "documentation":"

      The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

      Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

      Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, and 2.5.1.

      " }, "DagS3Path":{ "shape":"RelativePath", @@ -1297,5 +1299,5 @@ "pattern":"(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30)" } }, - "documentation":"

      Amazon Managed Workflows for Apache Airflow

      This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

      Endpoints

      Regions

      For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

      " + "documentation":"

      Amazon Managed Workflows for Apache Airflow

      This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?.

      Endpoints

      Regions

      For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

      " } diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 9c1658d7c294..50c3dcbab43c 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 6bbb3e53b233..5136857ef9c3 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index aeb463263870..a2ceb4e46984 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index 4ce85967d5c8..5b36ff4a381c 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index c33c15542747..463eedff4618 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 802f086da75e..b00341f723c3 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/customization.config b/services/omics/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/omics/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index 37b9bb86b68d..601dcc6a55b9 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearch/src/main/resources/codegen-resources/service-2.json b/services/opensearch/src/main/resources/codegen-resources/service-2.json index 8198d4422e62..eb26e418a0c4 100644 --- a/services/opensearch/src/main/resources/codegen-resources/service-2.json +++ b/services/opensearch/src/main/resources/codegen-resources/service-2.json @@ -1763,7 +1763,11 @@ "members":{ "Endpoint":{ "shape":"Endpoint", - "documentation":"

      The endpoint of the remote domain.

      " + "documentation":"

      The Endpoint attribute cannot be modified.

      The endpoint of the remote domain. Applicable for VPC_ENDPOINT connection mode.

      " + }, + "CrossClusterSearch":{ + "shape":"CrossClusterSearchConnectionProperties", + "documentation":"

      The connection properties for cross cluster search.

      " } }, "documentation":"

      The connection properties of an outbound connection.

      " @@ -1880,6 +1884,10 @@ "ConnectionMode":{ "shape":"ConnectionMode", "documentation":"

      The connection mode.

      " + }, + "ConnectionProperties":{ + "shape":"ConnectionProperties", + "documentation":"

      The ConnectionProperties for the outbound connection.

      " } }, "documentation":"

      Container for the parameters to the CreateOutboundConnection operation.

      " @@ -1987,6 +1995,16 @@ } }, "CreatedAt":{"type":"timestamp"}, + "CrossClusterSearchConnectionProperties":{ + "type":"structure", + "members":{ + "SkipUnavailable":{ + "shape":"SkipUnavailableStatus", + "documentation":"

      Status of SkipUnavailable param for outbound connection.

      " + } + }, + "documentation":"

      Cross cluster search specific connection properties.

      " + }, "DeleteDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -2529,8 +2547,7 @@ }, "DescribePackagesFilterValues":{ "type":"list", - "member":{"shape":"DescribePackagesFilterValue"}, - "min":1 + "member":{"shape":"DescribePackagesFilterValue"} }, "DescribePackagesRequest":{ "type":"structure", @@ -5136,6 +5153,14 @@ "type":"string", "documentation":"

      The domain endpoint to which index and search requests are submitted. For example, search-imdb-movies-oopcnjfn6ugo.eu-west-1.es.amazonaws.com or doc-imdb-movies-oopcnjfn6u.eu-west-1.es.amazonaws.com.

      " }, + "SkipUnavailableStatus":{ + "type":"string", + "documentation":"

      Status of SkipUnavailable param for outbound connection.

      • ENABLED - The SkipUnavailable param is enabled for the connection.

      • DISABLED - The SkipUnavailable param is disabled for the connection.

      ", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "SlotList":{ "type":"list", "member":{"shape":"Long"} diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 005a9187783e..48caff19ee6b 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index c3fdfba8e09e..5fe1d812f787 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 40f273aadbdc..04c5797f351e 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 2ec6eba09ff3..6174dde2dc43 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 4c1c90c82830..9f1894f11e34 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index b426a8f851c1..85fa75a20444 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index fd9e1acc4d87..06a733bab578 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml new file mode 100644 index 000000000000..f281a20bddce --- /dev/null +++ b/services/paymentcryptography/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.93-SNAPSHOT + + paymentcryptography + AWS Java SDK :: Services :: Payment Cryptography + The AWS Java SDK for Payment Cryptography module holds the client classes that are used for + communicating with Payment Cryptography. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.paymentcryptography + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..ff2c3e5d3ab0 --- /dev/null +++ b/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://controlplane.payment-cryptography.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://controlplane.payment-cryptography.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-tests.json b/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..859cd0c52420 --- /dev/null +++ b/services/paymentcryptography/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://controlplane.payment-cryptography.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/paginators-1.json b/services/paymentcryptography/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..02af499b6530 --- /dev/null +++ b/services/paymentcryptography/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Aliases" + }, + "ListKeys": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Keys" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + } + } +} diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json b/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..0a25ad5bc3f4 --- /dev/null +++ b/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1640 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-09-14", + "endpointPrefix":"controlplane.payment-cryptography", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Payment Cryptography Control Plane", + "serviceId":"Payment Cryptography", + "signatureVersion":"v4", + "signingName":"payment-cryptography", + "targetPrefix":"PaymentCryptographyControlPlane", + "uid":"payment-cryptography-2021-09-14" + }, + "operations":{ + "CreateAlias":{ + "name":"CreateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAliasInput"}, + "output":{"shape":"CreateAliasOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates an alias, or a friendly name, for an Amazon Web Services Payment Cryptography key. You can use an alias to identify a key in the console and when you call cryptographic operations such as EncryptData or DecryptData.

      You can associate the alias with any key in the same Amazon Web Services Region. Each alias is associated with only one key at a time, but a key can have multiple aliases. You can't create an alias without a key. The alias must be unique in the account and Amazon Web Services Region, but you can create another alias with the same name in a different Amazon Web Services Region.

      To change the key that's associated with the alias, call UpdateAlias. To delete the alias, call DeleteAlias. These operations don't affect the underlying key. To get the alias that you created, call ListAliases.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "CreateKey":{ + "name":"CreateKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateKeyInput"}, + "output":{"shape":"CreateKeyOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates an Amazon Web Services Payment Cryptography key, a logical representation of a cryptographic key, that is unique in your account and Amazon Web Services Region. You use keys for cryptographic functions such as encryption and decryption.

      In addition to the key material used in cryptographic operations, an Amazon Web Services Payment Cryptography key includes metadata such as the key ARN, key usage, key origin, creation date, description, and key state.

      When you create a key, you specify both immutable and mutable data about the key. The immutable data contains key attributes that defines the scope and cryptographic operations that you can perform using the key, for example key class (example: SYMMETRIC_KEY), key algorithm (example: TDES_2KEY), key usage (example: TR31_P0_PIN_ENCRYPTION_KEY) and key modes of use (example: Encrypt). For information about valid combinations of key attributes, see Understanding key attributes in the Amazon Web Services Payment Cryptography User Guide. The mutable data contained within a key includes usage timestamp and key deletion timestamp and can be modified after creation.

      Amazon Web Services Payment Cryptography binds key attributes to keys using key blocks when you store or export them. Amazon Web Services Payment Cryptography stores the key contents wrapped and never stores or transmits them in the clear.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "DeleteAlias":{ + "name":"DeleteAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAliasInput"}, + "output":{"shape":"DeleteAliasOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes the alias, but doesn't affect the underlying key.

      Each key can have multiple aliases. To get the aliases of all keys, use the ListAliases operation. To change the alias of a key, first use DeleteAlias to delete the current alias and then use CreateAlias to create a new alias. To associate an existing alias with a different key, call UpdateAlias.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "DeleteKey":{ + "name":"DeleteKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteKeyInput"}, + "output":{"shape":"DeleteKeyOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes the key material and all metadata associated with Amazon Web Services Payment Cryptography key.

      Key deletion is irreversible. After a key is deleted, you can't perform cryptographic operations using the key. For example, you can't decrypt data that was encrypted by a deleted Amazon Web Services Payment Cryptography key, and the data may become unrecoverable. Because key deletion is destructive, Amazon Web Services Payment Cryptography has a safety mechanism to prevent accidental deletion of a key. When you call this operation, Amazon Web Services Payment Cryptography disables the specified key but doesn't delete it until after a waiting period. The default waiting period is 7 days. To set a different waiting period, set DeleteKeyInDays. During the waiting period, the KeyState is DELETE_PENDING. After the key is deleted, the KeyState is DELETE_COMPLETE.

      If you delete key material, you can use ImportKey to reimport the same key material into the Amazon Web Services Payment Cryptography key.

      You should delete a key only when you are sure that you don't need to use it anymore and no other parties are utilizing this key. If you aren't sure, consider deactivating it instead by calling StopKeyUsage.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "ExportKey":{ + "name":"ExportKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportKeyInput"}, + "output":{"shape":"ExportKeyOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Exports a key from Amazon Web Services Payment Cryptography using either ANSI X9 TR-34 or TR-31 key export standard.

      Amazon Web Services Payment Cryptography simplifies main or root key exchange process by eliminating the need of a paper-based key exchange process. It takes a modern and secure approach based of the ANSI X9 TR-34 key exchange standard.

      You can use ExportKey to export main or root keys such as KEK (Key Encryption Key), using asymmetric key exchange technique following ANSI X9 TR-34 standard. The ANSI X9 TR-34 standard uses asymmetric keys to establishes bi-directional trust between the two parties exchanging keys. After which you can export working keys using the ANSI X9 TR-31 symmetric key exchange standard as mandated by PCI PIN. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to perform cryptographic operations outside of Amazon Web Services Payment Cryptography

      TR-34 key export

      Amazon Web Services Payment Cryptography uses TR-34 asymmetric key exchange standard to export main keys such as KEK. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Host (KRH). In key export process, KDH is Amazon Web Services Payment Cryptography which initiates key export. KRH is the user receiving the key. Before you initiate TR-34 key export, you must obtain an export token by calling GetParametersForExport. This operation also returns the signing key certificate that KDH uses to sign the wrapped key to generate a TR-34 wrapped key block. The export token expires after 7 days.

      Set the following parameters:

      CertificateAuthorityPublicKeyIdentifier

      The KeyARN of the certificate chain that will sign the wrapping key certificate. This must exist within Amazon Web Services Payment Cryptography before you initiate TR-34 key export. If it does not exist, you can import it by calling ImportKey for RootCertificatePublicKey.

      ExportToken

      Obtained from KDH by calling GetParametersForExport.

      WrappingKeyCertificate

      Amazon Web Services Payment Cryptography uses this to wrap the key under export.

      When this operation is successful, Amazon Web Services Payment Cryptography returns the TR-34 wrapped key block.

      TR-31 key export

      Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange standard to export working keys. In TR-31, you must use a main key such as KEK to encrypt or wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey. When this operation is successful, Amazon Web Services Payment Cryptography returns a TR-31 wrapped key block.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GetAlias":{ + "name":"GetAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAliasInput"}, + "output":{"shape":"GetAliasOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets the Amazon Web Services Payment Cryptography key associated with the alias.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GetKey":{ + "name":"GetKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetKeyInput"}, + "output":{"shape":"GetKeyOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets the key material for an Amazon Web Services Payment Cryptography key, including the immutable and mutable data specified when the key was created.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GetParametersForExport":{ + "name":"GetParametersForExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetParametersForExportInput"}, + "output":{"shape":"GetParametersForExportOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets the export token and the signing key certificate to initiate a TR-34 key export from Amazon Web Services Payment Cryptography.

      The signing key certificate signs the wrapped key under export within the TR-34 key payload. The export token and signing key certificate must be in place and operational before calling ExportKey. The export token expires in 7 days. You can use the same export token to export multiple keys from your service account.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GetParametersForImport":{ + "name":"GetParametersForImport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetParametersForImportInput"}, + "output":{"shape":"GetParametersForImportOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets the import token and the wrapping key certificate to initiate a TR-34 key import into Amazon Web Services Payment Cryptography.

      The wrapping key certificate wraps the key under import within the TR-34 key payload. The import token and wrapping key certificate must be in place and operational before calling ImportKey. The import token expires in 7 days. The same import token can be used to import multiple keys into your service account.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GetPublicKeyCertificate":{ + "name":"GetPublicKeyCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPublicKeyCertificateInput"}, + "output":{"shape":"GetPublicKeyCertificateOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Gets the public key certificate of the asymmetric key pair that exists within Amazon Web Services Payment Cryptography.

      Unlike the private key of an asymmetric key, which never leaves Amazon Web Services Payment Cryptography unencrypted, callers with GetPublicKeyCertificate permission can download the public key certificate of the asymmetric key. You can share the public key certificate to allow others to encrypt messages and verify signatures outside of Amazon Web Services Payment Cryptography

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      " + }, + "ImportKey":{ + "name":"ImportKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportKeyInput"}, + "output":{"shape":"ImportKeyOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Imports keys and public key certificates into Amazon Web Services Payment Cryptography.

      Amazon Web Services Payment Cryptography simplifies main or root key exchange process by eliminating the need of a paper-based key exchange process. It takes a modern and secure approach based of the ANSI X9 TR-34 key exchange standard.

      You can use ImportKey to import main or root keys such as KEK (Key Encryption Key) using asymmetric key exchange technique following the ANSI X9 TR-34 standard. The ANSI X9 TR-34 standard uses asymmetric keys to establishes bi-directional trust between the two parties exchanging keys.

      After you have imported a main or root key, you can import working keys to perform various cryptographic operations within Amazon Web Services Payment Cryptography using the ANSI X9 TR-31 symmetric key exchange standard as mandated by PCI PIN.

      You can also import a root public key certificate, a self-signed certificate used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

      To import a public root key certificate

      Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.

      Set the following parameters:

      • KeyMaterial: RootCertificatePublicKey

      • KeyClass: PUBLIC_KEY

      • KeyModesOfUse: Verify

      • KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE

      • PublicKeyCertificate: The certificate authority used to sign the root public key certificate.

      To import a trusted public key certificate

      The root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:

      • KeyMaterial: TrustedCertificatePublicKey

      • CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey.

      • KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform.

      • PublicKeyCertificate: The certificate authority used to sign the trusted public key certificate.

      Import main keys

      Amazon Web Services Payment Cryptography uses TR-34 asymmetric key exchange standard to import main keys such as KEK. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Host (KRH). During the key import process, KDH is the user who initiates the key import and KRH is Amazon Web Services Payment Cryptography who receives the key. Before initiating TR-34 key import, you must obtain an import token by calling GetParametersForImport. This operation also returns the wrapping key certificate that KDH uses wrap key under import to generate a TR-34 wrapped key block. The import token expires after 7 days.

      Set the following parameters:

      • CertificateAuthorityPublicKeyIdentifier: The KeyArn of the certificate chain that will sign the signing key certificate and should exist within Amazon Web Services Payment Cryptography before initiating TR-34 key import. If it does not exist, you can import it by calling by calling ImportKey for RootCertificatePublicKey.

      • ImportToken: Obtained from KRH by calling GetParametersForImport.

      • WrappedKeyBlock: The TR-34 wrapped key block from KDH. It contains the KDH key under import, wrapped with KRH provided wrapping key certificate and signed by the KDH private signing key. This TR-34 key block is generated by the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography.

      • SigningKeyCertificate: The public component of the private key that signed the KDH TR-34 wrapped key block. In PEM certificate format.

      TR-34 is intended primarily to exchange 3DES keys. Your ability to export AES-128 and larger AES keys may be dependent on your source system.

      Import working keys

      Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange standard to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import. To initiate a TR-31 key import, set the following parameters:

      • WrappedKeyBlock: The key under import and encrypted using KEK. The TR-31 key block generated by your HSM outside of Amazon Web Services Payment Cryptography.

      • WrappingKeyIdentifier: The KeyArn of the KEK that Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "ListAliases":{ + "name":"ListAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAliasesInput"}, + "output":{"shape":"ListAliasesOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Lists the aliases for all keys in the caller's Amazon Web Services account and Amazon Web Services Region. You can filter the list of aliases. For more information, see Using aliases in the Amazon Web Services Payment Cryptography User Guide.

      This is a paginated operation, which means that each response might contain only a subset of all the aliases. When the response contains only a subset of aliases, it includes a NextToken value. Use this value in a subsequent ListAliases request to get more aliases. When you receive a response with no NextToken (or an empty or null value), that means there are no more aliases to get.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "ListKeys":{ + "name":"ListKeys", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListKeysInput"}, + "output":{"shape":"ListKeysOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Lists the keys in the caller's Amazon Web Services account and Amazon Web Services Region. You can filter the list of keys.

      This is a paginated operation, which means that each response might contain only a subset of all the keys. When the response contains only a subset of keys, it includes a NextToken value. Use this value in a subsequent ListKeys request to get more keys. When you receive a response with no NextToken (or an empty or null value), that means there are no more keys to get.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Lists the tags for an Amazon Web Services resource.

      This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTagsForResource request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "RestoreKey":{ + "name":"RestoreKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreKeyInput"}, + "output":{"shape":"RestoreKeyOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Cancels a scheduled key deletion during the waiting period. Use this operation to restore a Key that is scheduled for deletion.

      During the waiting period, the KeyState is DELETE_PENDING and deletePendingTimestamp contains the date and time after which the Key will be deleted. After Key is restored, the KeyState is CREATE_COMPLETE, and the value for deletePendingTimestamp is removed.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "StartKeyUsage":{ + "name":"StartKeyUsage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartKeyUsageInput"}, + "output":{"shape":"StartKeyUsageOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Enables an Amazon Web Services Payment Cryptography key, which makes it active for cryptographic operations within Amazon Web Services Payment Cryptography

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "StopKeyUsage":{ + "name":"StopKeyUsage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopKeyUsageInput"}, + "output":{"shape":"StopKeyUsageOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Disables an Amazon Web Services Payment Cryptography key, which makes it inactive within Amazon Web Services Payment Cryptography.

      You can use this operation instead of DeleteKey to deactivate a key. You can enable the key in the future by calling StartKeyUsage.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Adds or edits tags on an Amazon Web Services Payment Cryptography key.

      Tagging or untagging an Amazon Web Services Payment Cryptography key can allow or deny permission to the key.

      Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value. You can also add tags to an Amazon Web Services Payment Cryptography key when you create it with CreateKey.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes a tag from an Amazon Web Services Payment Cryptography key.

      Tagging or untagging an Amazon Web Services Payment Cryptography key can allow or deny permission to the key.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "UpdateAlias":{ + "name":"UpdateAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAliasInput"}, + "output":{"shape":"UpdateAliasOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Associates an existing Amazon Web Services Payment Cryptography alias with a different key. Each alias is associated with only one Amazon Web Services Payment Cryptography key at a time, although a key can have multiple aliases. The alias and the Amazon Web Services Payment Cryptography key must be in the same Amazon Web Services account and Amazon Web Services Region

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      You do not have sufficient access to perform this action.

      ", + "exception":true + }, + "Alias":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

      A friendly name that you can use to refer to a key. The value must begin with alias/.

      Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The KeyARN of the key associated with the alias.

      " + } + }, + "documentation":"

      Contains information about an alias.

      " + }, + "AliasName":{ + "type":"string", + "max":256, + "min":7, + "pattern":"^alias/[a-zA-Z0-9/_-]+$" + }, + "Aliases":{ + "type":"list", + "member":{"shape":"Alias"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CertificateType":{ + "type":"string", + "max":32768, + "min":1, + "pattern":"^[^\\[;\\]<>]+$", + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      This request can cause an inconsistent state for the resource.

      ", + "exception":true + }, + "CreateAliasInput":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

      A friendly name that you can use to refer a key. An alias must begin with alias/ followed by a name, for example alias/ExampleAlias. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-).

      Don't include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The KeyARN of the key to associate with the alias.

      " + } + } + }, + "CreateAliasOutput":{ + "type":"structure", + "required":["Alias"], + "members":{ + "Alias":{ + "shape":"Alias", + "documentation":"

      The alias for the key.

      " + } + } + }, + "CreateKeyInput":{ + "type":"structure", + "required":[ + "Exportable", + "KeyAttributes" + ], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

      Specifies whether to enable the key. If the key is enabled, it is activated for use within the service. If the key not enabled, then it is created but not activated. The default value is enabled.

      " + }, + "Exportable":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the key is exportable from the service.

      " + }, + "KeyAttributes":{ + "shape":"KeyAttributes", + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after the key is created.

      " + }, + "KeyCheckValueAlgorithm":{ + "shape":"KeyCheckValueAlgorithm", + "documentation":"

      The algorithm that Amazon Web Services Payment Cryptography uses to calculate the key check value (KCV) for DES and AES keys.

      For DES key, the KCV is computed by encrypting 8 bytes, each with value '00', with the key to be checked and retaining the 3 highest order bytes of the encrypted result. For AES key, the KCV is computed by encrypting 8 bytes, each with value '01', with the key to be checked and retaining the 3 highest order bytes of the encrypted result.

      " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

      The tags to attach to the key. Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key.

      To use this parameter, you must have TagResource permission.

      Don't include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      Tagging or untagging an Amazon Web Services Payment Cryptography key can allow or deny permission to the key.

      " + } + } + }, + "CreateKeyOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The key material that contains all the key attributes.

      " + } + } + }, + "DeleteAliasInput":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

      A friendly name that you can use to refer Amazon Web Services Payment Cryptography key. This value must begin with alias/ followed by a name, such as alias/ExampleAlias.

      " + } + } + }, + "DeleteAliasOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteKeyInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "DeleteKeyInDays":{ + "shape":"DeleteKeyInputDeleteKeyInDaysInteger", + "documentation":"

      The waiting period for key deletion. The default value is seven days.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the key that is scheduled for deletion.

      " + } + } + }, + "DeleteKeyInputDeleteKeyInDaysInteger":{ + "type":"integer", + "box":true, + "max":180, + "min":3 + }, + "DeleteKeyOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The KeyARN of the key that is scheduled for deletion.

      " + } + } + }, + "ExportKeyInput":{ + "type":"structure", + "required":[ + "ExportKeyIdentifier", + "KeyMaterial" + ], + "members":{ + "ExportKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the key under export from Amazon Web Services Payment Cryptography.

      " + }, + "KeyMaterial":{ + "shape":"ExportKeyMaterial", + "documentation":"

      The key block format type, for example, TR-34 or TR-31, to use during key material export.

      " + } + } + }, + "ExportKeyMaterial":{ + "type":"structure", + "members":{ + "Tr31KeyBlock":{ + "shape":"ExportTr31KeyBlock", + "documentation":"

      Parameter information for key material export using TR-31 standard.

      " + }, + "Tr34KeyBlock":{ + "shape":"ExportTr34KeyBlock", + "documentation":"

      Parameter information for key material export using TR-34 standard.

      " + } + }, + "documentation":"

      Parameter information for key material export from Amazon Web Services Payment Cryptography.

      ", + "union":true + }, + "ExportKeyOutput":{ + "type":"structure", + "members":{ + "WrappedKey":{ + "shape":"WrappedKey", + "documentation":"

      The key material under export as a TR-34 or TR-31 wrapped key block.

      " + } + } + }, + "ExportTokenId":{ + "type":"string", + "pattern":"^export-token-[0-9a-zA-Z]{16,64}$" + }, + "ExportTr31KeyBlock":{ + "type":"structure", + "required":["WrappingKeyIdentifier"], + "members":{ + "WrappingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the the wrapping key. This key encrypts or wraps the key under export for TR-31 key block generation.

      " + } + }, + "documentation":"

      Parameter information for key material export using TR-31 standard.

      " + }, + "ExportTr34KeyBlock":{ + "type":"structure", + "required":[ + "CertificateAuthorityPublicKeyIdentifier", + "ExportToken", + "KeyBlockFormat", + "WrappingKeyCertificate" + ], + "members":{ + "CertificateAuthorityPublicKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the certificate chain that signs the wrapping key certificate during TR-34 key export.

      " + }, + "ExportToken":{ + "shape":"ExportTokenId", + "documentation":"

      The export token to initiate key export from Amazon Web Services Payment Cryptography. It also contains the signing key certificate that will sign the wrapped key during TR-34 key block generation. Call GetParametersForExport to receive an export token. It expires after 7 days. You can use the same export token to export multiple keys from the same service account.

      " + }, + "KeyBlockFormat":{ + "shape":"Tr34KeyBlockFormat", + "documentation":"

      The format of key block that Amazon Web Services Payment Cryptography will use during key export.

      " + }, + "RandomNonce":{ + "shape":"HexLength16", + "documentation":"

      A random number value that is unique to the TR-34 key block generated using 2 pass. The operation will fail, if a random nonce value is not provided for a TR-34 key block generated using 2 pass.

      " + }, + "WrappingKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      The KeyARN of the wrapping key certificate. Amazon Web Services Payment Cryptography uses this certificate to wrap the key under export.

      " + } + }, + "documentation":"

      Parameter information for key material export using TR-34 standard.

      " + }, + "GetAliasInput":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

      The alias of the Amazon Web Services Payment Cryptography key.

      " + } + } + }, + "GetAliasOutput":{ + "type":"structure", + "required":["Alias"], + "members":{ + "Alias":{ + "shape":"Alias", + "documentation":"

      The alias of the Amazon Web Services Payment Cryptography key.

      " + } + } + }, + "GetKeyInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the Amazon Web Services Payment Cryptography key.

      " + } + } + }, + "GetKeyOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The key material, including the immutable and mutable data for the key.

      " + } + } + }, + "GetParametersForExportInput":{ + "type":"structure", + "required":[ + "KeyMaterialType", + "SigningKeyAlgorithm" + ], + "members":{ + "KeyMaterialType":{ + "shape":"KeyMaterialType", + "documentation":"

      The key block format type (for example, TR-34 or TR-31) to use during key material export. Export token is only required for a TR-34 key export, TR34_KEY_BLOCK. Export token is not required for TR-31 key export.

      " + }, + "SigningKeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

      The signing key algorithm to generate a signing key certificate. This certificate signs the wrapped key under export within the TR-34 key block cryptogram. RSA_2048 is the only signing key algorithm allowed.

      " + } + } + }, + "GetParametersForExportOutput":{ + "type":"structure", + "required":[ + "ExportToken", + "ParametersValidUntilTimestamp", + "SigningKeyAlgorithm", + "SigningKeyCertificate", + "SigningKeyCertificateChain" + ], + "members":{ + "ExportToken":{ + "shape":"ExportTokenId", + "documentation":"

      The export token to initiate key export from Amazon Web Services Payment Cryptography. The export token expires after 7 days. You can use the same export token to export multiple keys from the same service account.

      " + }, + "ParametersValidUntilTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The validity period of the export token.

      " + }, + "SigningKeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

      The algorithm of the signing key certificate for use in TR-34 key block generation. RSA_2048 is the only signing key algorithm allowed.

      " + }, + "SigningKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      The signing key certificate of the public key for signature within the TR-34 key block cryptogram. The certificate expires after 7 days.

      " + }, + "SigningKeyCertificateChain":{ + "shape":"CertificateType", + "documentation":"

      The certificate chain that signed the signing key certificate. This is the root certificate authority (CA) within your service account.

      " + } + } + }, + "GetParametersForImportInput":{ + "type":"structure", + "required":[ + "KeyMaterialType", + "WrappingKeyAlgorithm" + ], + "members":{ + "KeyMaterialType":{ + "shape":"KeyMaterialType", + "documentation":"

      The key block format type such as TR-34 or TR-31 to use during key material import. Import token is only required for TR-34 key import TR34_KEY_BLOCK. Import token is not required for TR-31 key import.

      " + }, + "WrappingKeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

      The wrapping key algorithm to generate a wrapping key certificate. This certificate wraps the key under import within the TR-34 key block cryptogram. RSA_2048 is the only wrapping key algorithm allowed.

      " + } + } + }, + "GetParametersForImportOutput":{ + "type":"structure", + "required":[ + "ImportToken", + "ParametersValidUntilTimestamp", + "WrappingKeyAlgorithm", + "WrappingKeyCertificate", + "WrappingKeyCertificateChain" + ], + "members":{ + "ImportToken":{ + "shape":"ImportTokenId", + "documentation":"

      The import token to initiate key import into Amazon Web Services Payment Cryptography. The import token expires after 7 days. You can use the same import token to import multiple keys to the same service account.

      " + }, + "ParametersValidUntilTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The validity period of the import token.

      " + }, + "WrappingKeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

      The algorithm of the wrapping key for use within TR-34 key block. RSA_2048 is the only wrapping key algorithm allowed.

      " + }, + "WrappingKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      The wrapping key certificate of the wrapping key for use within the TR-34 key block. The certificate expires in 7 days.

      " + }, + "WrappingKeyCertificateChain":{ + "shape":"CertificateType", + "documentation":"

      The Amazon Web Services Payment Cryptography certificate chain that signed the wrapping key certificate. This is the root certificate authority (CA) within your service account.

      " + } + } + }, + "GetPublicKeyCertificateInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the asymmetric key pair.

      " + } + } + }, + "GetPublicKeyCertificateOutput":{ + "type":"structure", + "required":[ + "KeyCertificate", + "KeyCertificateChain" + ], + "members":{ + "KeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      The public key component of the asymmetric key pair in a certificate (PEM) format. It is signed by the root certificate authority (CA) within your service account. The certificate expires in 90 days.

      " + }, + "KeyCertificateChain":{ + "shape":"CertificateType", + "documentation":"

      The certificate chain that signed the public key certificate of the asymmetric key pair. This is the root certificate authority (CA) within your service account.

      " + } + } + }, + "HexLength16":{ + "type":"string", + "max":16, + "min":16, + "pattern":"^[0-9A-F]+$" + }, + "ImportKeyInput":{ + "type":"structure", + "required":["KeyMaterial"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

      Specifies whether import key is enabled.

      " + }, + "KeyCheckValueAlgorithm":{ + "shape":"KeyCheckValueAlgorithm", + "documentation":"

      The algorithm that Amazon Web Services Payment Cryptography uses to calculate the key check value (KCV) for DES and AES keys.

      For DES key, the KCV is computed by encrypting 8 bytes, each with value '00', with the key to be checked and retaining the 3 highest order bytes of the encrypted result. For AES key, the KCV is computed by encrypting 8 bytes, each with value '01', with the key to be checked and retaining the 3 highest order bytes of the encrypted result.

      " + }, + "KeyMaterial":{ + "shape":"ImportKeyMaterial", + "documentation":"

      The key or public key certificate type to use during key material import, for example TR-34 or RootCertificatePublicKey.

      " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

      The tags to attach to the key. Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key.

      You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key. If you specify an existing tag key with a different tag value, Amazon Web Services Payment Cryptography replaces the current tag value with the specified one.

      To use this parameter, you must have TagResource permission.

      Don't include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      Tagging or untagging an Amazon Web Services Payment Cryptography key can allow or deny permission to the key.

      " + } + } + }, + "ImportKeyMaterial":{ + "type":"structure", + "members":{ + "RootCertificatePublicKey":{ + "shape":"RootCertificatePublicKey", + "documentation":"

      Parameter information for root public key certificate import.

      " + }, + "Tr31KeyBlock":{ + "shape":"ImportTr31KeyBlock", + "documentation":"

      Parameter information for key material import using TR-31 standard.

      " + }, + "Tr34KeyBlock":{ + "shape":"ImportTr34KeyBlock", + "documentation":"

      Parameter information for key material import using TR-34 standard.

      " + }, + "TrustedCertificatePublicKey":{ + "shape":"TrustedCertificatePublicKey", + "documentation":"

      Parameter information for trusted public key certificate import.

      " + } + }, + "documentation":"

      Parameter information for key material import.

      ", + "union":true + }, + "ImportKeyOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The KeyARN of the key material imported within Amazon Web Services Payment Cryptography.

      " + } + } + }, + "ImportTokenId":{ + "type":"string", + "pattern":"^import-token-[0-9a-zA-Z]{16,64}$" + }, + "ImportTr31KeyBlock":{ + "type":"structure", + "required":[ + "WrappedKeyBlock", + "WrappingKeyIdentifier" + ], + "members":{ + "WrappedKeyBlock":{ + "shape":"Tr31WrappedKeyBlock", + "documentation":"

      The TR-34 wrapped key block to import.

      " + }, + "WrappingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the key that will decrypt or unwrap a TR-31 key block during import.

      " + } + }, + "documentation":"

      Parameter information for key material import using TR-31 standard.

      " + }, + "ImportTr34KeyBlock":{ + "type":"structure", + "required":[ + "CertificateAuthorityPublicKeyIdentifier", + "ImportToken", + "KeyBlockFormat", + "SigningKeyCertificate", + "WrappedKeyBlock" + ], + "members":{ + "CertificateAuthorityPublicKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the certificate chain that signs the signing key certificate during TR-34 key import.

      " + }, + "ImportToken":{ + "shape":"ImportTokenId", + "documentation":"

      The import token that initiates key import into Amazon Web Services Payment Cryptography. It expires after 7 days. You can use the same import token to import multiple keys to the same service account.

      " + }, + "KeyBlockFormat":{ + "shape":"Tr34KeyBlockFormat", + "documentation":"

      The key block format to use during key import. The only value allowed is X9_TR34_2012.

      " + }, + "RandomNonce":{ + "shape":"HexLength16", + "documentation":"

      A random number value that is unique to the TR-34 key block generated using 2 pass. The operation will fail, if a random nonce value is not provided for a TR-34 key block generated using 2 pass.

      " + }, + "SigningKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      The public key component in PEM certificate format of the private key that signs the KDH TR-34 wrapped key block.

      " + }, + "WrappedKeyBlock":{ + "shape":"Tr34WrappedKeyBlock", + "documentation":"

      The TR-34 wrapped key block to import.

      " + } + }, + "documentation":"

      Parameter information for key material import using TR-34 standard.

      " + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request processing has failed because of an unknown error, exception, or failure.

      ", + "exception":true, + "fault":true + }, + "Key":{ + "type":"structure", + "required":[ + "CreateTimestamp", + "Enabled", + "Exportable", + "KeyArn", + "KeyAttributes", + "KeyCheckValue", + "KeyCheckValueAlgorithm", + "KeyOrigin", + "KeyState" + ], + "members":{ + "CreateTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The date and time when the key was created.

      " + }, + "DeletePendingTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The date and time after which Amazon Web Services Payment Cryptography will delete the key. This value is present only when KeyState is DELETE_PENDING and the key is scheduled for deletion.

      " + }, + "DeleteTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The date and time after which Amazon Web Services Payment Cryptography will delete the key. This value is present only when when the KeyState is DELETE_COMPLETE and the Amazon Web Services Payment Cryptography key is deleted.

      " + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the key is enabled.

      " + }, + "Exportable":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the key is exportable. This data is immutable after the key is created.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The Amazon Resource Name (ARN) of the key.

      " + }, + "KeyAttributes":{ + "shape":"KeyAttributes", + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after the key is created.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "KeyCheckValueAlgorithm":{ + "shape":"KeyCheckValueAlgorithm", + "documentation":"

      The algorithm used for calculating key check value (KCV) for DES and AES keys. For a DES key, Amazon Web Services Payment Cryptography computes the KCV by encrypting 8 bytes, each with value '00', with the key to be checked and retaining the 3 highest order bytes of the encrypted result. For an AES key, Amazon Web Services Payment Cryptography computes the KCV by encrypting 8 bytes, each with value '01', with the key to be checked and retaining the 3 highest order bytes of the encrypted result.

      " + }, + "KeyOrigin":{ + "shape":"KeyOrigin", + "documentation":"

      The source of the key material. For keys created within Amazon Web Services Payment Cryptography, the value is AWS_PAYMENT_CRYPTOGRAPHY. For keys imported into Amazon Web Services Payment Cryptography, the value is EXTERNAL.

      " + }, + "KeyState":{ + "shape":"KeyState", + "documentation":"

      The state of key that is being created or deleted.

      " + }, + "UsageStartTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The date and time after which Amazon Web Services Payment Cryptography will start using the key material for cryptographic operations.

      " + }, + "UsageStopTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The date and time after which Amazon Web Services Payment Cryptography will stop using the key material for cryptographic operations.

      " + } + }, + "documentation":"

      Metadata about an Amazon Web Services Payment Cryptography key.

      " + }, + "KeyAlgorithm":{ + "type":"string", + "enum":[ + "TDES_2KEY", + "TDES_3KEY", + "AES_128", + "AES_192", + "AES_256", + "RSA_2048", + "RSA_3072", + "RSA_4096" + ] + }, + "KeyArn":{ + "type":"string", + "max":150, + "min":70, + "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:key/[0-9a-zA-Z]{16,64}$" + }, + "KeyArnOrKeyAliasType":{ + "type":"string", + "max":322, + "min":7, + "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:(key/[0-9a-zA-Z]{16,64}|alias/[a-zA-Z0-9/_-]+)$|^alias/[a-zA-Z0-9/_-]+$" + }, + "KeyAttributes":{ + "type":"structure", + "required":[ + "KeyAlgorithm", + "KeyClass", + "KeyModesOfUse", + "KeyUsage" + ], + "members":{ + "KeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

      The key algorithm to be use during creation of an Amazon Web Services Payment Cryptography key.

      For symmetric keys, Amazon Web Services Payment Cryptography supports AES and TDES algorithms. For asymmetric keys, Amazon Web Services Payment Cryptography supports RSA and ECC_NIST algorithms.

      " + }, + "KeyClass":{ + "shape":"KeyClass", + "documentation":"

      The type of Amazon Web Services Payment Cryptography key to create, which determines the classification of the cryptographic method and whether Amazon Web Services Payment Cryptography key contains a symmetric key or an asymmetric key pair.

      " + }, + "KeyModesOfUse":{ + "shape":"KeyModesOfUse", + "documentation":"

      The list of cryptographic operations that you can perform using the key.

      " + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"

      The cryptographic usage of an Amazon Web Services Payment Cryptography key as defined in section A.5.2 of the TR-31 spec.

      " + } + }, + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after the key is created.

      " + }, + "KeyCheckValue":{ + "type":"string", + "max":16, + "min":4, + "pattern":"^[0-9a-fA-F]+$" + }, + "KeyCheckValueAlgorithm":{ + "type":"string", + "enum":[ + "CMAC", + "ANSI_X9_24" + ] + }, + "KeyClass":{ + "type":"string", + "enum":[ + "SYMMETRIC_KEY", + "ASYMMETRIC_KEY_PAIR", + "PRIVATE_KEY", + "PUBLIC_KEY" + ] + }, + "KeyMaterial":{ + "type":"string", + "max":16384, + "min":48, + "sensitive":true + }, + "KeyMaterialType":{ + "type":"string", + "enum":[ + "TR34_KEY_BLOCK", + "TR31_KEY_BLOCK", + "ROOT_PUBLIC_KEY_CERTIFICATE", + "TRUSTED_PUBLIC_KEY_CERTIFICATE" + ] + }, + "KeyModesOfUse":{ + "type":"structure", + "members":{ + "Decrypt":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to decrypt data.

      " + }, + "DeriveKey":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to derive new keys.

      " + }, + "Encrypt":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to encrypt data.

      " + }, + "Generate":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to generate and verify other card and PIN verification keys.

      " + }, + "NoRestrictions":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key has no special restrictions other than the restrictions implied by KeyUsage.

      " + }, + "Sign":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used for signing.

      " + }, + "Unwrap":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to unwrap other keys.

      " + }, + "Verify":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to verify signatures.

      " + }, + "Wrap":{ + "shape":"PrimitiveBoolean", + "documentation":"

      Specifies whether an Amazon Web Services Payment Cryptography key can be used to wrap other keys.

      " + } + }, + "documentation":"

      The list of cryptographic operations that you can perform using the key. The modes of use are defined in section A.5.3 of the TR-31 spec.

      " + }, + "KeyOrigin":{ + "type":"string", + "documentation":"

      Defines the source of a key

      ", + "enum":[ + "EXTERNAL", + "AWS_PAYMENT_CRYPTOGRAPHY" + ] + }, + "KeyState":{ + "type":"string", + "documentation":"

      Defines the state of a key

      ", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "DELETE_PENDING", + "DELETE_COMPLETE" + ] + }, + "KeySummary":{ + "type":"structure", + "required":[ + "Enabled", + "Exportable", + "KeyArn", + "KeyAttributes", + "KeyCheckValue", + "KeyState" + ], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the key is enabled.

      " + }, + "Exportable":{ + "shape":"Boolean", + "documentation":"

      Specifies whether the key is exportable. This data is immutable after the key is created.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The Amazon Resource Name (ARN) of the key.

      " + }, + "KeyAttributes":{ + "shape":"KeyAttributes", + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after the key is created.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "KeyState":{ + "shape":"KeyState", + "documentation":"

      The state of an Amazon Web Services Payment Cryptography that is being created or deleted.

      " + } + }, + "documentation":"

      Metadata about an Amazon Web Services Payment Cryptography key.

      " + }, + "KeySummaryList":{ + "type":"list", + "member":{"shape":"KeySummary"} + }, + "KeyUsage":{ + "type":"string", + "enum":[ + "TR31_B0_BASE_DERIVATION_KEY", + "TR31_C0_CARD_VERIFICATION_KEY", + "TR31_D0_SYMMETRIC_DATA_ENCRYPTION_KEY", + "TR31_D1_ASYMMETRIC_KEY_FOR_DATA_ENCRYPTION", + "TR31_E0_EMV_MKEY_APP_CRYPTOGRAMS", + "TR31_E1_EMV_MKEY_CONFIDENTIALITY", + "TR31_E2_EMV_MKEY_INTEGRITY", + "TR31_E4_EMV_MKEY_DYNAMIC_NUMBERS", + "TR31_E5_EMV_MKEY_CARD_PERSONALIZATION", + "TR31_E6_EMV_MKEY_OTHER", + "TR31_K0_KEY_ENCRYPTION_KEY", + "TR31_K1_KEY_BLOCK_PROTECTION_KEY", + "TR31_K3_ASYMMETRIC_KEY_FOR_KEY_AGREEMENT", + "TR31_M3_ISO_9797_3_MAC_KEY", + "TR31_M6_ISO_9797_5_CMAC_KEY", + "TR31_M7_HMAC_KEY", + "TR31_P0_PIN_ENCRYPTION_KEY", + "TR31_P1_PIN_GENERATION_KEY", + "TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE", + "TR31_V1_IBM3624_PIN_VERIFICATION_KEY", + "TR31_V2_VISA_PIN_VERIFICATION_KEY", + "TR31_K2_TR34_ASYMMETRIC_KEY" + ] + }, + "ListAliasesInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      Use this parameter to specify the maximum number of items to return. When this value is present, Amazon Web Services Payment Cryptography does not return more than the specified number of items, but it might return fewer.

      This value is optional. If you include a value, it must be between 1 and 100, inclusive. If you do not include a value, it defaults to 50.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      Use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the truncated response you just received.

      " + } + } + }, + "ListAliasesOutput":{ + "type":"structure", + "required":["Aliases"], + "members":{ + "Aliases":{ + "shape":"Aliases", + "documentation":"

      The list of aliases. Each alias describes the KeyArn contained within.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results, or an empty or null value if there are no more results.

      " + } + } + }, + "ListKeysInput":{ + "type":"structure", + "members":{ + "KeyState":{ + "shape":"KeyState", + "documentation":"

      The key state of the keys you want to list.

      " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      Use this parameter to specify the maximum number of items to return. When this value is present, Amazon Web Services Payment Cryptography does not return more than the specified number of items, but it might return fewer.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      Use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the truncated response you just received.

      " + } + } + }, + "ListKeysOutput":{ + "type":"structure", + "required":["Keys"], + "members":{ + "Keys":{ + "shape":"KeySummaryList", + "documentation":"

      The list of keys created within the caller's Amazon Web Services account and Amazon Web Services Region.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results, or an empty or null value if there are no more results.

      " + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      Use this parameter to specify the maximum number of items to return. When this value is present, Amazon Web Services Payment Cryptography does not return more than the specified number of items, but it might return fewer.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      Use this parameter in a subsequent request after you receive a response with truncated results. Set it to the value of NextToken from the truncated response you just received.

      " + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

      The KeyARN of the key whose tags you are getting.

      " + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["Tags"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

      The token for the next set of results, or an empty or null value if there are no more results.

      " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

      The list of tags associated with a ResourceArn. Each tag will list the key-value pair contained within that tag.

      " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":8192, + "min":1 + }, + "PrimitiveBoolean":{"type":"boolean"}, + "ResourceArn":{ + "type":"string", + "max":150, + "min":70, + "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:key/[0-9a-zA-Z]{16,64}$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "documentation":"

      The string for the exception.

      " + } + }, + "documentation":"

      The request was denied due to an invalid resource error.

      ", + "exception":true + }, + "RestoreKeyInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the key to be restored within Amazon Web Services Payment Cryptography.

      " + } + } + }, + "RestoreKeyOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The key material of the restored key. The KeyState will change to CREATE_COMPLETE and value for DeletePendingTimestamp gets removed.

      " + } + } + }, + "RootCertificatePublicKey":{ + "type":"structure", + "required":[ + "KeyAttributes", + "PublicKeyCertificate" + ], + "members":{ + "KeyAttributes":{ + "shape":"KeyAttributes", + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after the root public key is imported.

      " + }, + "PublicKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      Parameter information for root public key certificate import.

      " + } + }, + "documentation":"

      Parameter information for root public key certificate import.

      " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      This request would cause a service quota to be exceeded.

      ", + "exception":true + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The service cannot complete the request.

      ", + "exception":true, + "fault":true + }, + "StartKeyUsageInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyArn of the key.

      " + } + } + }, + "StartKeyUsageOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The KeyARN of the Amazon Web Services Payment Cryptography key activated for use.

      " + } + } + }, + "StopKeyUsageInput":{ + "type":"structure", + "required":["KeyIdentifier"], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyArn of the key.

      " + } + } + }, + "StopKeyUsageOutput":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"Key", + "documentation":"

      The KeyARN of the key.

      " + } + } + }, + "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

      The key of the tag.

      " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

      The value of the tag.

      " + } + }, + "documentation":"

      A structure that contains information about a tag.

      " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

      The KeyARN of the key whose tags are being updated.

      " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

      One or more tags. Each tag consists of a tag key and a tag value. The tag value can be an empty (null) string. You can't have more than one tag on an Amazon Web Services Payment Cryptography key with the same tag key. If you specify an existing tag key with a different tag value, Amazon Web Services Payment Cryptography replaces the current tag value with the new one.

      Don't include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      To use this parameter, you must have TagResource permission in an IAM policy.

      Don't include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

      " + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request was denied due to request throttling.

      ", + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "Tr31WrappedKeyBlock":{ + "type":"string", + "max":9984, + "min":56, + "pattern":"^[0-9A-Z]+$" + }, + "Tr34KeyBlockFormat":{ + "type":"string", + "enum":["X9_TR34_2012"] + }, + "Tr34WrappedKeyBlock":{ + "type":"string", + "max":4096, + "min":2, + "pattern":"^[0-9A-F]+$" + }, + "TrustedCertificatePublicKey":{ + "type":"structure", + "required":[ + "CertificateAuthorityPublicKeyIdentifier", + "KeyAttributes", + "PublicKeyCertificate" + ], + "members":{ + "CertificateAuthorityPublicKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The KeyARN of the root public key certificate or certificate chain that signs the trusted public key certificate import.

      " + }, + "KeyAttributes":{ + "shape":"KeyAttributes", + "documentation":"

      The role of the key, the algorithm it supports, and the cryptographic operations allowed with the key. This data is immutable after a trusted public key is imported.

      " + }, + "PublicKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

      Parameter information for trusted public key certificate import.

      " + } + }, + "documentation":"

      Parameter information for trusted public key certificate import.

      " + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

      The KeyARN of the key whose tags are being removed.

      " + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

      One or more tag keys. Don't include the tag values.

      If the Amazon Web Services Payment Cryptography key doesn't have the specified tag key, Amazon Web Services Payment Cryptography doesn't throw an exception or return a response. To confirm that the operation succeeded, use the ListTagsForResource operation.

      " + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateAliasInput":{ + "type":"structure", + "required":["AliasName"], + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

      The alias whose associated key is changing.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The KeyARN for the key that you are updating or removing from the alias.

      " + } + } + }, + "UpdateAliasOutput":{ + "type":"structure", + "required":["Alias"], + "members":{ + "Alias":{ + "shape":"Alias", + "documentation":"

      The alias name.

      " + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request was denied due to an invalid request error.

      ", + "exception":true + }, + "WrappedKey":{ + "type":"structure", + "required":[ + "KeyMaterial", + "WrappedKeyMaterialFormat", + "WrappingKeyArn" + ], + "members":{ + "KeyMaterial":{ + "shape":"KeyMaterial", + "documentation":"

      Parameter information for generating a wrapped key using TR-31 or TR-34 standard.

      " + }, + "WrappedKeyMaterialFormat":{ + "shape":"WrappedKeyMaterialFormat", + "documentation":"

      The key block format of a wrapped key.

      " + }, + "WrappingKeyArn":{ + "shape":"KeyArn", + "documentation":"

      The KeyARN of the wrapped key.

      " + } + }, + "documentation":"

      Parameter information for generating a wrapped key using TR-31 or TR-34 standard.

      " + }, + "WrappedKeyMaterialFormat":{ + "type":"string", + "enum":[ + "KEY_CRYPTOGRAM", + "TR31_KEY_BLOCK", + "TR34_KEY_BLOCK" + ] + } + }, + "documentation":"

      You use the Amazon Web Services Payment Cryptography Control Plane to manage the encryption keys you use for payment-related cryptographic operations. You can create, import, export, share, manage, and delete keys. You can also manage Identity and Access Management (IAM) policies for keys. For more information, see Identity and access management in the Amazon Web Services Payment Cryptography User Guide.

      To use encryption keys for payment-related transaction processing and associated cryptographic operations, you use the Amazon Web Services Payment Cryptography Data Plane. You can encrypt, decrypt, generate, verify, and translate payment-related cryptographic operations.

      All Amazon Web Services Payment Cryptography API calls must be signed and transmitted using Transport Layer Security (TLS). We recommend you always use the latest supported TLS version for logging API requests.

      Amazon Web Services Payment Cryptography supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to Amazon Web Services Payment Cryptography, who made the request, when it was made, and so on. If you don't configure a trail, you can still view the most recent events in the CloudTrail console. For more information, see the CloudTrail User Guide.

      " +} diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml new file mode 100644 index 000000000000..18988c44c8b0 --- /dev/null +++ b/services/paymentcryptographydata/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.93-SNAPSHOT + + paymentcryptographydata + AWS Java SDK :: Services :: Payment Cryptography Data + The AWS Java SDK for Payment Cryptography Data module holds the client classes that are used for + communicating with Payment Cryptography Data. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.paymentcryptographydata + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..0686f59b325a --- /dev/null +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dataplane.payment-cryptography.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dataplane.payment-cryptography.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..b78414e06822 --- /dev/null +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/paginators-1.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9c1f1d630320 --- /dev/null +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2108 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-02-03", + "endpointPrefix":"dataplane.payment-cryptography", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Payment Cryptography Data Plane", + "serviceId":"Payment Cryptography Data", + "signatureVersion":"v4", + "signingName":"payment-cryptography", + "uid":"payment-cryptography-data-2022-02-03" + }, + "operations":{ + "DecryptData":{ + "name":"DecryptData", + "http":{ + "method":"POST", + "requestUri":"/keys/{KeyIdentifier}/decrypt", + "responseCode":200 + }, + "input":{"shape":"DecryptDataInput"}, + "output":{"shape":"DecryptDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Decrypts ciphertext data to plaintext using symmetric, asymmetric, or DUKPT data encryption key. For more information, see Decrypt data in the Amazon Web Services Payment Cryptography User Guide.

      You can use an encryption key generated within Amazon Web Services Payment Cryptography, or you can import your own encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Decrypt. In asymmetric decryption, Amazon Web Services Payment Cryptography decrypts the ciphertext using the private component of the asymmetric encryption key pair. For data encryption outside of Amazon Web Services Payment Cryptography, you can export the public component of the asymmetric key pair by calling GetPublicCertificate.

      For symmetric and DUKPT decryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For asymmetric decryption, Amazon Web Services Payment Cryptography supports RSA. When you use DUKPT, for TDES algorithm, the ciphertext data length must be a multiple of 16 bytes. For AES algorithm, the ciphertext data length must be a multiple of 32 bytes.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "EncryptData":{ + "name":"EncryptData", + "http":{ + "method":"POST", + "requestUri":"/keys/{KeyIdentifier}/encrypt", + "responseCode":200 + }, + "input":{"shape":"EncryptDataInput"}, + "output":{"shape":"EncryptDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Encrypts plaintext data to ciphertext using symmetric, asymmetric, or DUKPT data encryption key. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide.

      You can generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey. You can import your own encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Encrypt. In asymmetric encryption, plaintext is encrypted using public component. You can import the public component of an asymmetric key pair created outside Amazon Web Services Payment Cryptography by calling ImportKey).

      for symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. To encrypt using DUKPT, you must already have a DUKPT key in your account with KeyModesOfUse set to DeriveKey, or you can generate a new DUKPT key by calling CreateKey.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GenerateCardValidationData":{ + "name":"GenerateCardValidationData", + "http":{ + "method":"POST", + "requestUri":"/cardvalidationdata/generate", + "responseCode":200 + }, + "input":{"shape":"GenerateCardValidationDataInput"}, + "output":{"shape":"GenerateCardValidationDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Generates card-related validation data using algorithms such as Card Verification Values (CVV/CVV2), Dynamic Card Verification Values (dCVV/dCVV2), or Card Security Codes (CSC). For more information, see Generate card data in the Amazon Web Services Payment Cryptography User Guide.

      This operation generates a CVV or CSC value that is printed on a payment credit or debit card during card production. The CVV or CSC, PAN (Primary Account Number) and expiration date of the card are required to check its validity during transaction processing. To begin this operation, a CVK (Card Verification Key) encryption key is required. You can use CreateKey or ImportKey to establish a CVK within Amazon Web Services Payment Cryptography. The KeyModesOfUse should be set to Generate and Verify for a CVK encryption key.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GenerateMac":{ + "name":"GenerateMac", + "http":{ + "method":"POST", + "requestUri":"/mac/generate", + "responseCode":200 + }, + "input":{"shape":"GenerateMacInput"}, + "output":{"shape":"GenerateMacOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Generates a Message Authentication Code (MAC) cryptogram within Amazon Web Services Payment Cryptography.

      You can use this operation when keys won't be shared but mutual data is present on both ends for validation. In this case, known data values are used to generate a MAC on both ends for comparision without sending or receiving data in ciphertext or plaintext. You can use this operation to generate a DUPKT, HMAC or EMV MAC by setting generation attributes and algorithm to the associated values. The MAC generation encryption key must have valid values for KeyUsage such as TR31_M7_HMAC_KEY for HMAC generation, and they key must have KeyModesOfUse set to Generate and Verify.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "GeneratePinData":{ + "name":"GeneratePinData", + "http":{ + "method":"POST", + "requestUri":"/pindata/generate", + "responseCode":200 + }, + "input":{"shape":"GeneratePinDataInput"}, + "output":{"shape":"GeneratePinDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Generates pin-related data such as PIN, PIN Verification Value (PVV), PIN Block, and PIN Offset during new card issuance or reissuance. For more information, see Generate PIN data in the Amazon Web Services Payment Cryptography User Guide.

      PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation generates PIN, PVV, or PIN Offset and then encrypts it using Pin Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography. This operation uses a separate Pin Verification Key (PVK) for VISA PVV generation.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "ReEncryptData":{ + "name":"ReEncryptData", + "http":{ + "method":"POST", + "requestUri":"/keys/{IncomingKeyIdentifier}/reencrypt", + "responseCode":200 + }, + "input":{"shape":"ReEncryptDataInput"}, + "output":{"shape":"ReEncryptDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys.

      You can either generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services Payment Cryptography.

      For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "TranslatePinData":{ + "name":"TranslatePinData", + "http":{ + "method":"POST", + "requestUri":"/pindata/translate", + "responseCode":200 + }, + "input":{"shape":"TranslatePinDataInput"}, + "output":{"shape":"TranslatePinDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide.

      PIN block translation involves changing the encrytion of PIN block from one encryption key to another encryption key and changing PIN block format from one to another without PIN block data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation type for DUKPT tranlations. You can use this operation for P2PE (Point to Point Encryption) use cases where the encryption keys should change but the processing system either does not need to, or is not permitted to, decrypt the data.

      The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      At this time, Amazon Web Services Payment Cryptography does not support translations to PIN format 4.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "VerifyAuthRequestCryptogram":{ + "name":"VerifyAuthRequestCryptogram", + "http":{ + "method":"POST", + "requestUri":"/cryptogram/verify", + "responseCode":200 + }, + "input":{"shape":"VerifyAuthRequestCryptogramInput"}, + "output":{"shape":"VerifyAuthRequestCryptogramOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"VerificationFailedException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Verifies Authorization Request Cryptogram (ARQC) for a EMV chip payment card authorization. For more information, see Verify auth request cryptogram in the Amazon Web Services Payment Cryptography User Guide.

      ARQC generation is done outside of Amazon Web Services Payment Cryptography and is typically generated on a point of sale terminal for an EMV chip card to obtain payment authorization during transaction time. For ARQC verification, you must first import the ARQC generated outside of Amazon Web Services Payment Cryptography by calling ImportKey. This operation uses the imported ARQC and an major encryption key (DUKPT) created by calling CreateKey to either provide a boolean ARQC verification result or provide an APRC (Authorization Response Cryptogram) response using Method 1 or Method 2. The ARPC_METHOD_1 uses AuthResponseCode to generate ARPC and ARPC_METHOD_2 uses CardStatusUpdate to generate ARPC.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "VerifyCardValidationData":{ + "name":"VerifyCardValidationData", + "http":{ + "method":"POST", + "requestUri":"/cardvalidationdata/verify", + "responseCode":200 + }, + "input":{"shape":"VerifyCardValidationDataInput"}, + "output":{"shape":"VerifyCardValidationDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"VerificationFailedException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Verifies card-related validation data using algorithms such as Card Verification Values (CVV/CVV2), Dynamic Card Verification Values (dCVV/dCVV2) and Card Security Codes (CSC). For more information, see Verify card data in the Amazon Web Services Payment Cryptography User Guide.

      This operation validates the CVV or CSC codes that is printed on a payment credit or debit card during card payment transaction. The input values are typically provided as part of an inbound transaction to an issuer or supporting platform partner. Amazon Web Services Payment Cryptography uses CVV or CSC, PAN (Primary Account Number) and expiration date of the card to check its validity during transaction processing. In this operation, the CVK (Card Verification Key) encryption key for use with card data verification is same as the one in used for GenerateCardValidationData.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "VerifyMac":{ + "name":"VerifyMac", + "http":{ + "method":"POST", + "requestUri":"/mac/verify", + "responseCode":200 + }, + "input":{"shape":"VerifyMacInput"}, + "output":{"shape":"VerifyMacOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"VerificationFailedException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Verifies a Message Authentication Code (MAC).

      You can use this operation when keys won't be shared but mutual data is present on both ends for validation. In this case, known data values are used to generate a MAC on both ends for verification without sending or receiving data in ciphertext or plaintext. You can use this operation to verify a DUPKT, HMAC or EMV MAC by setting generation attributes and algorithm to the associated values. Use the same encryption key for MAC verification as you use for GenerateMac.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + }, + "VerifyPinData":{ + "name":"VerifyPinData", + "http":{ + "method":"POST", + "requestUri":"/pindata/verify", + "responseCode":200 + }, + "input":{"shape":"VerifyPinDataInput"}, + "output":{"shape":"VerifyPinDataOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"VerificationFailedException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Verifies pin-related data such as PIN and PIN Offset using algorithms including VISA PVV and IBM3624. For more information, see Verify PIN data in the Amazon Web Services Payment Cryptography User Guide.

      This operation verifies PIN data for user payment card. A card holder PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation uses PIN Verification Key (PVK) for PIN or PIN Offset generation and then encrypts it using PIN Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography.

      For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

      Cross-account use: This operation can't be used across different Amazon Web Services accounts.

      Related operations:

      " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      You do not have sufficient access to perform this action.

      ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AmexCardSecurityCodeVersion1":{ + "type":"structure", + "required":["CardExpiryDate"], + "members":{ + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + } + }, + "documentation":"

      Card data parameters that are required to generate a Card Security Code (CSC2) for an AMEX payment card.

      " + }, + "AmexCardSecurityCodeVersion2":{ + "type":"structure", + "required":[ + "CardExpiryDate", + "ServiceCode" + ], + "members":{ + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + }, + "ServiceCode":{ + "shape":"NumberLengthEquals3", + "documentation":"

      The service code of the AMEX payment card. This is different from the Card Security Code (CSC).

      " + } + }, + "documentation":"

      Card data parameters that are required to generate a Card Security Code (CSC2) for an AMEX payment card.

      " + }, + "AsymmetricEncryptionAttributes":{ + "type":"structure", + "members":{ + "PaddingType":{ + "shape":"PaddingType", + "documentation":"

      The padding to be included with the data.

      " + } + }, + "documentation":"

      Parameters for plaintext encryption using asymmetric keys.

      " + }, + "CardGenerationAttributes":{ + "type":"structure", + "members":{ + "AmexCardSecurityCodeVersion1":{"shape":"AmexCardSecurityCodeVersion1"}, + "AmexCardSecurityCodeVersion2":{ + "shape":"AmexCardSecurityCodeVersion2", + "documentation":"

      Card data parameters that are required to generate a Card Security Code (CSC2) for an AMEX payment card.

      " + }, + "CardHolderVerificationValue":{ + "shape":"CardHolderVerificationValue", + "documentation":"

      Card data parameters that are required to generate a cardholder verification value for the payment card.

      " + }, + "CardVerificationValue1":{ + "shape":"CardVerificationValue1", + "documentation":"

      Card data parameters that are required to generate Card Verification Value (CVV) for the payment card.

      " + }, + "CardVerificationValue2":{ + "shape":"CardVerificationValue2", + "documentation":"

      Card data parameters that are required to generate Card Verification Value (CVV2) for the payment card.

      " + }, + "DynamicCardVerificationCode":{ + "shape":"DynamicCardVerificationCode", + "documentation":"

      Card data parameters that are required to generate CDynamic Card Verification Code (dCVC) for the payment card.

      " + }, + "DynamicCardVerificationValue":{ + "shape":"DynamicCardVerificationValue", + "documentation":"

      Card data parameters that are required to generate CDynamic Card Verification Value (dCVV) for the payment card.

      " + } + }, + "documentation":"

      Card data parameters that are required to generate Card Verification Values (CVV/CVV2), Dynamic Card Verification Values (dCVV/dCVV2), or Card Security Codes (CSC).

      ", + "union":true + }, + "CardHolderVerificationValue":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "PanSequenceNumber", + "UnpredictableNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter value that comes from a point of sale terminal.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

      A random number generated by the issuer.

      " + } + }, + "documentation":"

      Card data parameters that are required to generate a cardholder verification value for the payment card.

      " + }, + "CardVerificationAttributes":{ + "type":"structure", + "members":{ + "AmexCardSecurityCodeVersion1":{"shape":"AmexCardSecurityCodeVersion1"}, + "AmexCardSecurityCodeVersion2":{ + "shape":"AmexCardSecurityCodeVersion2", + "documentation":"

      Card data parameters that are required to verify a Card Security Code (CSC2) for an AMEX payment card.

      " + }, + "CardHolderVerificationValue":{ + "shape":"CardHolderVerificationValue", + "documentation":"

      Card data parameters that are required to verify a cardholder verification value for the payment card.

      " + }, + "CardVerificationValue1":{ + "shape":"CardVerificationValue1", + "documentation":"

      Card data parameters that are required to verify Card Verification Value (CVV) for the payment card.

      " + }, + "CardVerificationValue2":{ + "shape":"CardVerificationValue2", + "documentation":"

      Card data parameters that are required to verify Card Verification Value (CVV2) for the payment card.

      " + }, + "DiscoverDynamicCardVerificationCode":{ + "shape":"DiscoverDynamicCardVerificationCode", + "documentation":"

      Card data parameters that are required to verify CDynamic Card Verification Code (dCVC) for the payment card.

      " + }, + "DynamicCardVerificationCode":{ + "shape":"DynamicCardVerificationCode", + "documentation":"

      Card data parameters that are required to verify CDynamic Card Verification Code (dCVC) for the payment card.

      " + }, + "DynamicCardVerificationValue":{ + "shape":"DynamicCardVerificationValue", + "documentation":"

      Card data parameters that are required to verify CDynamic Card Verification Value (dCVV) for the payment card.

      " + } + }, + "documentation":"

      Card data parameters that are requried to verify Card Verification Values (CVV/CVV2), Dynamic Card Verification Values (dCVV/dCVV2), or Card Security Codes (CSC).

      ", + "union":true + }, + "CardVerificationValue1":{ + "type":"structure", + "required":[ + "CardExpiryDate", + "ServiceCode" + ], + "members":{ + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + }, + "ServiceCode":{ + "shape":"NumberLengthEquals3", + "documentation":"

      The service code of the payment card. This is different from Card Security Code (CSC).

      " + } + }, + "documentation":"

      Card data parameters that are required to verify CVV (Card Verification Value) for the payment card.

      " + }, + "CardVerificationValue2":{ + "type":"structure", + "required":["CardExpiryDate"], + "members":{ + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + } + }, + "documentation":"

      Card data parameters that are required to verify Card Verification Value (CVV2) for the payment card.

      " + }, + "CryptogramAuthResponse":{ + "type":"structure", + "members":{ + "ArpcMethod1":{ + "shape":"CryptogramVerificationArpcMethod1", + "documentation":"

      Parameters that are required for ARPC response generation using method1 after ARQC verification is successful.

      " + }, + "ArpcMethod2":{ + "shape":"CryptogramVerificationArpcMethod2", + "documentation":"

      Parameters that are required for ARPC response generation using method2 after ARQC verification is successful.

      " + } + }, + "documentation":"

      Parameters that are required for Authorization Response Cryptogram (ARPC) generation after Authorization Request Cryptogram (ARQC) verification is successful.

      ", + "union":true + }, + "CryptogramVerificationArpcMethod1":{ + "type":"structure", + "required":["AuthResponseCode"], + "members":{ + "AuthResponseCode":{ + "shape":"HexLengthEquals4", + "documentation":"

      The auth code used to calculate APRC after ARQC verification is successful. This is the same auth code used for ARQC generation outside of Amazon Web Services Payment Cryptography.

      " + } + }, + "documentation":"

      Parameters that are required for ARPC response generation using method1 after ARQC verification is successful.

      " + }, + "CryptogramVerificationArpcMethod2":{ + "type":"structure", + "required":["CardStatusUpdate"], + "members":{ + "CardStatusUpdate":{ + "shape":"HexLengthEquals8", + "documentation":"

      The data indicating whether the issuer approves or declines an online transaction using an EMV chip card.

      " + }, + "ProprietaryAuthenticationData":{ + "shape":"HexLengthBetween1And16", + "documentation":"

      The proprietary authentication data used by issuer for communication during online transaction using an EMV chip card.

      " + } + }, + "documentation":"

      Parameters that are required for ARPC response generation using method2 after ARQC verification is successful.

      " + }, + "DecryptDataInput":{ + "type":"structure", + "required":[ + "CipherText", + "DecryptionAttributes", + "KeyIdentifier" + ], + "members":{ + "CipherText":{ + "shape":"HexEvenLengthBetween16And4096", + "documentation":"

      The ciphertext to decrypt.

      " + }, + "DecryptionAttributes":{ + "shape":"EncryptionDecryptionAttributes", + "documentation":"

      The encryption key type and attributes for ciphertext decryption.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for ciphertext decryption.

      ", + "location":"uri", + "locationName":"KeyIdentifier" + } + } + }, + "DecryptDataOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue", + "PlainText" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for ciphertext decryption.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "PlainText":{ + "shape":"HexEvenLengthBetween16And4096", + "documentation":"

      The decrypted plaintext data.

      " + } + } + }, + "DiscoverDynamicCardVerificationCode":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "CardExpiryDate", + "UnpredictableNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter value that comes from the terminal.

      " + }, + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + }, + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

      A random number that is generated by the issuer.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify dCVC (Dynamic Card Verification Code).

      " + }, + "DukptAttributes":{ + "type":"structure", + "required":[ + "DukptDerivationType", + "KeySerialNumber" + ], + "members":{ + "DukptDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

      The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

      " + }, + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

      The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

      " + } + }, + "documentation":"

      Parameters that are used for Derived Unique Key Per Transaction (DUKPT) derivation algorithm.

      " + }, + "DukptDerivationAttributes":{ + "type":"structure", + "required":["KeySerialNumber"], + "members":{ + "DukptKeyDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

      The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY

      " + }, + "DukptKeyVariant":{ + "shape":"DukptKeyVariant", + "documentation":"

      The type of use of DUKPT, which can be for incoming data decryption, outgoing data encryption, or both.

      " + }, + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

      The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

      " + } + }, + "documentation":"

      Parameters required for encryption or decryption of data using DUKPT.

      " + }, + "DukptDerivationType":{ + "type":"string", + "enum":[ + "TDES_2KEY", + "TDES_3KEY", + "AES_128", + "AES_192", + "AES_256" + ] + }, + "DukptEncryptionAttributes":{ + "type":"structure", + "required":["KeySerialNumber"], + "members":{ + "DukptKeyDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

      The key type encrypted using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY

      " + }, + "DukptKeyVariant":{ + "shape":"DukptKeyVariant", + "documentation":"

      The type of use of DUKPT, which can be incoming data decryption, outgoing data encryption, or both.

      " + }, + "InitializationVector":{ + "shape":"HexLength16Or32", + "documentation":"

      An input to cryptographic primitive used to provide the intial state. Typically the InitializationVector must have a random or psuedo-random value, but sometimes it only needs to be unpredictable or unique. If you don't provide a value, Amazon Web Services Payment Cryptography generates a random value.

      " + }, + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

      The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

      " + }, + "Mode":{ + "shape":"DukptEncryptionMode", + "documentation":"

      The block cipher mode of operation. Block ciphers are designed to encrypt a block of data of fixed size, for example, 128 bits. The size of the input block is usually same as the size of the encrypted output block, while the key length can be different. A mode of operation describes how to repeatedly apply a cipher's single-block operation to securely transform amounts of data larger than a block.

      The default is CBC.

      " + } + }, + "documentation":"

      Parameters that are required to encrypt plaintext data using DUKPT.

      " + }, + "DukptEncryptionMode":{ + "type":"string", + "enum":[ + "ECB", + "CBC" + ] + }, + "DukptKeyVariant":{ + "type":"string", + "enum":[ + "BIDIRECTIONAL", + "REQUEST", + "RESPONSE" + ] + }, + "DynamicCardVerificationCode":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "PanSequenceNumber", + "TrackData", + "UnpredictableNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter value that comes from the terminal.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "TrackData":{ + "shape":"HexLengthBetween2And160", + "documentation":"

      The data on the two tracks of magnetic cards used for financial transactions. This includes the cardholder name, PAN, expiration date, bank ID (BIN) and several other numbers the issuing bank uses to validate the data received.

      " + }, + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

      A random number generated by the issuer.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Dynamic Card Verification Value (dCVV).

      " + }, + "DynamicCardVerificationValue":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "CardExpiryDate", + "PanSequenceNumber", + "ServiceCode" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter value that comes from the terminal.

      " + }, + "CardExpiryDate":{ + "shape":"NumberLengthEquals4", + "documentation":"

      The expiry date of a payment card.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "ServiceCode":{ + "shape":"NumberLengthEquals3", + "documentation":"

      The service code of the payment card. This is different from Card Security Code (CSC).

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Dynamic Card Verification Value (dCVV).

      " + }, + "EncryptDataInput":{ + "type":"structure", + "required":[ + "EncryptionAttributes", + "KeyIdentifier", + "PlainText" + ], + "members":{ + "EncryptionAttributes":{ + "shape":"EncryptionDecryptionAttributes", + "documentation":"

      The encryption key type and attributes for plaintext encryption.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

      ", + "location":"uri", + "locationName":"KeyIdentifier" + }, + "PlainText":{ + "shape":"HexEvenLengthBetween16And4064", + "documentation":"

      The plaintext to be encrypted.

      " + } + } + }, + "EncryptDataOutput":{ + "type":"structure", + "required":[ + "CipherText", + "KeyArn", + "KeyCheckValue" + ], + "members":{ + "CipherText":{ + "shape":"HexEvenLengthBetween16And4096", + "documentation":"

      The encrypted ciphertext.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "EncryptionDecryptionAttributes":{ + "type":"structure", + "members":{ + "Asymmetric":{"shape":"AsymmetricEncryptionAttributes"}, + "Dukpt":{"shape":"DukptEncryptionAttributes"}, + "Symmetric":{ + "shape":"SymmetricEncryptionAttributes", + "documentation":"

      Parameters that are required to perform encryption and decryption using symmetric keys.

      " + } + }, + "documentation":"

      Parameters that are required to perform encryption and decryption operations.

      ", + "union":true + }, + "EncryptionMode":{ + "type":"string", + "enum":[ + "ECB", + "CBC", + "CFB", + "CFB1", + "CFB8", + "CFB64", + "CFB128", + "OFB" + ] + }, + "GenerateCardValidationDataInput":{ + "type":"structure", + "required":[ + "GenerationAttributes", + "KeyIdentifier", + "PrimaryAccountNumber" + ], + "members":{ + "GenerationAttributes":{ + "shape":"CardGenerationAttributes", + "documentation":"

      The algorithm for generating CVV or CSC values for the card within Amazon Web Services Payment Cryptography.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to generate card data.

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

      " + }, + "ValidationDataLength":{ + "shape":"IntegerRangeBetween3And5Type", + "documentation":"

      The length of the CVV or CSC to be generated. The default value is 3.

      " + } + } + }, + "GenerateCardValidationDataOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue", + "ValidationData" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to generate CVV or CSC.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "ValidationData":{ + "shape":"NumberLengthBetween3And5", + "documentation":"

      The CVV or CSC value that Amazon Web Services Payment Cryptography generates for the card.

      " + } + } + }, + "GenerateMacInput":{ + "type":"structure", + "required":[ + "GenerationAttributes", + "KeyIdentifier", + "MessageData" + ], + "members":{ + "GenerationAttributes":{ + "shape":"MacAttributes", + "documentation":"

      The attributes and data values to use for MAC generation within Amazon Web Services Payment Cryptography.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the MAC generation encryption key.

      " + }, + "MacLength":{ + "shape":"IntegerRangeBetween4And16", + "documentation":"

      The length of a MAC under generation.

      " + }, + "MessageData":{ + "shape":"HexLengthBetween2And4096", + "documentation":"

      The data for which a MAC is under generation.

      " + } + } + }, + "GenerateMacOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue", + "Mac" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for MAC generation.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "Mac":{ + "shape":"HexLengthBetween4And128", + "documentation":"

      The MAC cryptogram generated within Amazon Web Services Payment Cryptography.

      " + } + } + }, + "GeneratePinDataInput":{ + "type":"structure", + "required":[ + "EncryptionKeyIdentifier", + "GenerationAttributes", + "GenerationKeyIdentifier", + "PinBlockFormat", + "PrimaryAccountNumber" + ], + "members":{ + "EncryptionKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block.

      " + }, + "GenerationAttributes":{ + "shape":"PinGenerationAttributes", + "documentation":"

      The attributes and values to use for PIN, PVV, or PIN Offset generation.

      " + }, + "GenerationKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for pin data generation.

      " + }, + "PinBlockFormat":{ + "shape":"PinBlockFormatForPinData", + "documentation":"

      The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0 and ISO_Format_3.

      The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

      The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

      " + }, + "PinDataLength":{ + "shape":"IntegerRangeBetween4And12", + "documentation":"

      The length of PIN under generation.

      ", + "box":true + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

      " + } + } + }, + "GeneratePinDataOutput":{ + "type":"structure", + "required":[ + "EncryptedPinBlock", + "EncryptionKeyArn", + "EncryptionKeyCheckValue", + "GenerationKeyArn", + "GenerationKeyCheckValue", + "PinData" + ], + "members":{ + "EncryptedPinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The PIN block encrypted under PEK from Amazon Web Services Payment Cryptography. The encrypted PIN block is a composite of PAN (Primary Account Number) and PIN (Personal Identification Number), generated in accordance with ISO 9564 standard.

      " + }, + "EncryptionKeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

      " + }, + "EncryptionKeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "GenerationKeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the pin data generation key that Amazon Web Services Payment Cryptography uses for PIN, PVV or PIN Offset generation.

      " + }, + "GenerationKeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "PinData":{ + "shape":"PinData", + "documentation":"

      The attributes and values Amazon Web Services Payment Cryptography uses for pin data generation.

      " + } + } + }, + "HexEvenLengthBetween16And4064":{ + "type":"string", + "max":4064, + "min":16, + "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", + "sensitive":true + }, + "HexEvenLengthBetween16And4096":{ + "type":"string", + "max":4096, + "min":16, + "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", + "sensitive":true + }, + "HexLength16Or32":{ + "type":"string", + "max":32, + "min":16, + "pattern":"^(?:[0-9a-fA-F]{16}|[0-9a-fA-F]{32})$", + "sensitive":true + }, + "HexLengthBetween10And24":{ + "type":"string", + "max":24, + "min":10, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween16And32":{ + "type":"string", + "max":32, + "min":16, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween1And16":{ + "type":"string", + "max":16, + "min":1, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween2And1024":{ + "type":"string", + "max":1024, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween2And160":{ + "type":"string", + "max":160, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween2And4":{ + "type":"string", + "max":4, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween2And4096":{ + "type":"string", + "max":4096, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween2And8":{ + "type":"string", + "max":8, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthBetween4And128":{ + "type":"string", + "max":128, + "min":4, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthEquals1":{ + "type":"string", + "max":1, + "min":1, + "pattern":"^[0-9A-F]+$" + }, + "HexLengthEquals16":{ + "type":"string", + "max":16, + "min":16, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthEquals2":{ + "type":"string", + "max":2, + "min":2, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthEquals4":{ + "type":"string", + "max":4, + "min":4, + "pattern":"^[0-9a-fA-F]+$" + }, + "HexLengthEquals8":{ + "type":"string", + "max":8, + "min":8, + "pattern":"^[0-9a-fA-F]+$" + }, + "Ibm3624NaturalPin":{ + "type":"structure", + "required":[ + "DecimalizationTable", + "PinValidationData", + "PinValidationDataPadCharacter" + ], + "members":{ + "DecimalizationTable":{ + "shape":"NumberLengthEquals16", + "documentation":"

      The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

      " + }, + "PinValidationData":{ + "shape":"NumberLengthBetween4And16", + "documentation":"

      The unique data for cardholder identification.

      " + }, + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

      The padding character for validation data.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Ibm3624 natural PIN.

      " + }, + "Ibm3624PinFromOffset":{ + "type":"structure", + "required":[ + "DecimalizationTable", + "PinOffset", + "PinValidationData", + "PinValidationDataPadCharacter" + ], + "members":{ + "DecimalizationTable":{ + "shape":"NumberLengthEquals16", + "documentation":"

      The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

      " + }, + "PinOffset":{ + "shape":"NumberLengthBetween4And12", + "documentation":"

      The PIN offset value.

      " + }, + "PinValidationData":{ + "shape":"NumberLengthBetween4And16", + "documentation":"

      The unique data for cardholder identification.

      " + }, + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

      The padding character for validation data.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Ibm3624 PIN from offset PIN.

      " + }, + "Ibm3624PinOffset":{ + "type":"structure", + "required":[ + "DecimalizationTable", + "EncryptedPinBlock", + "PinValidationData", + "PinValidationDataPadCharacter" + ], + "members":{ + "DecimalizationTable":{ + "shape":"NumberLengthEquals16", + "documentation":"

      The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

      " + }, + "EncryptedPinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The encrypted PIN block data. According to ISO 9564 standard, a PIN Block is an encoded representation of a payment card Personal Account Number (PAN) and the cardholder Personal Identification Number (PIN).

      " + }, + "PinValidationData":{ + "shape":"NumberLengthBetween4And16", + "documentation":"

      The unique data for cardholder identification.

      " + }, + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

      The padding character for validation data.

      " + } + }, + "documentation":"

      Pparameters that are required to generate or verify Ibm3624 PIN offset PIN.

      " + }, + "Ibm3624PinVerification":{ + "type":"structure", + "required":[ + "DecimalizationTable", + "PinOffset", + "PinValidationData", + "PinValidationDataPadCharacter" + ], + "members":{ + "DecimalizationTable":{ + "shape":"NumberLengthEquals16", + "documentation":"

      The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

      " + }, + "PinOffset":{ + "shape":"NumberLengthBetween4And12", + "documentation":"

      The PIN offset value.

      " + }, + "PinValidationData":{ + "shape":"NumberLengthBetween4And16", + "documentation":"

      The unique data for cardholder identification.

      " + }, + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

      The padding character for validation data.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Ibm3624 PIN verification PIN.

      " + }, + "Ibm3624RandomPin":{ + "type":"structure", + "required":[ + "DecimalizationTable", + "PinValidationData", + "PinValidationDataPadCharacter" + ], + "members":{ + "DecimalizationTable":{ + "shape":"NumberLengthEquals16", + "documentation":"

      The decimalization table to use for IBM 3624 PIN algorithm. The table is used to convert the algorithm intermediate result from hexadecimal characters to decimal.

      " + }, + "PinValidationData":{ + "shape":"NumberLengthBetween4And16", + "documentation":"

      The unique data for cardholder identification.

      " + }, + "PinValidationDataPadCharacter":{ + "shape":"HexLengthEquals1", + "documentation":"

      The padding character for validation data.

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Ibm3624 random PIN.

      " + }, + "IntegerRangeBetween0And9":{ + "type":"integer", + "box":true, + "max":9, + "min":0 + }, + "IntegerRangeBetween3And5Type":{ + "type":"integer", + "box":true, + "max":5, + "min":3 + }, + "IntegerRangeBetween4And12":{ + "type":"integer", + "max":12, + "min":4 + }, + "IntegerRangeBetween4And16":{ + "type":"integer", + "box":true, + "max":16, + "min":4 + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request processing has failed because of an unknown error, exception, or failure.

      ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KeyArn":{ + "type":"string", + "max":150, + "min":70, + "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:key/[0-9a-zA-Z]{16,64}$" + }, + "KeyArnOrKeyAliasType":{ + "type":"string", + "max":322, + "min":7, + "pattern":"^arn:aws:payment-cryptography:[a-z]{2}-[a-z]{1,16}-[0-9]+:[0-9]{12}:(key/[0-9a-zA-Z]{16,64}|alias/[a-zA-Z0-9/_-]+)$|^alias/[a-zA-Z0-9/_-]+$" + }, + "KeyCheckValue":{ + "type":"string", + "max":16, + "min":4, + "pattern":"^[0-9a-fA-F]+$" + }, + "MacAlgorithm":{ + "type":"string", + "enum":[ + "ISO9797_ALGORITHM1", + "ISO9797_ALGORITHM3", + "CMAC", + "HMAC_SHA224", + "HMAC_SHA256", + "HMAC_SHA384", + "HMAC_SHA512" + ] + }, + "MacAlgorithmDukpt":{ + "type":"structure", + "required":[ + "DukptKeyVariant", + "KeySerialNumber" + ], + "members":{ + "DukptDerivationType":{ + "shape":"DukptDerivationType", + "documentation":"

      The key type derived using DUKPT from a Base Derivation Key (BDK) and Key Serial Number (KSN). This must be less than or equal to the strength of the BDK. For example, you can't use AES_128 as a derivation type for a BDK of AES_128 or TDES_2KEY.

      " + }, + "DukptKeyVariant":{ + "shape":"DukptKeyVariant", + "documentation":"

      The type of use of DUKPT, which can be MAC generation, MAC verification, or both.

      " + }, + "KeySerialNumber":{ + "shape":"HexLengthBetween10And24", + "documentation":"

      The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

      " + } + }, + "documentation":"

      Parameters required for DUKPT MAC generation and verification.

      " + }, + "MacAlgorithmEmv":{ + "type":"structure", + "required":[ + "MajorKeyDerivationMode", + "PanSequenceNumber", + "PrimaryAccountNumber", + "SessionKeyDerivationMode", + "SessionKeyDerivationValue" + ], + "members":{ + "MajorKeyDerivationMode":{ + "shape":"MajorKeyDerivationMode", + "documentation":"

      The method to use when deriving the master key for EMV MAC generation or verification.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN), a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + }, + "SessionKeyDerivationMode":{ + "shape":"SessionKeyDerivationMode", + "documentation":"

      The method of deriving a session key for EMV MAC generation or verification.

      " + }, + "SessionKeyDerivationValue":{ + "shape":"SessionKeyDerivationValue", + "documentation":"

      Parameters that are required to generate session key for EMV generation and verification.

      " + } + }, + "documentation":"

      Parameters that are required for EMV MAC generation and verification.

      " + }, + "MacAttributes":{ + "type":"structure", + "members":{ + "Algorithm":{ + "shape":"MacAlgorithm", + "documentation":"

      The encryption algorithm for MAC generation or verification.

      " + }, + "DukptCmac":{ + "shape":"MacAlgorithmDukpt", + "documentation":"

      Parameters that are required for MAC generation or verification using DUKPT CMAC algorithm.

      " + }, + "DukptIso9797Algorithm1":{ + "shape":"MacAlgorithmDukpt", + "documentation":"

      Parameters that are required for MAC generation or verification using DUKPT ISO 9797 algorithm1.

      " + }, + "DukptIso9797Algorithm3":{ + "shape":"MacAlgorithmDukpt", + "documentation":"

      Parameters that are required for MAC generation or verification using DUKPT ISO 9797 algorithm2.

      " + }, + "EmvMac":{ + "shape":"MacAlgorithmEmv", + "documentation":"

      Parameters that are required for MAC generation or verification using EMV MAC algorithm.

      " + } + }, + "documentation":"

      Parameters that are required for DUKPT, HMAC, or EMV MAC generation or verification.

      ", + "union":true + }, + "MajorKeyDerivationMode":{ + "type":"string", + "enum":[ + "EMV_OPTION_A", + "EMV_OPTION_B" + ] + }, + "NumberLengthBetween12And19":{ + "type":"string", + "max":19, + "min":12, + "pattern":"^[0-9]+$", + "sensitive":true + }, + "NumberLengthBetween3And5":{ + "type":"string", + "max":5, + "min":3, + "pattern":"^[0-9]+$" + }, + "NumberLengthBetween4And12":{ + "type":"string", + "max":12, + "min":4, + "pattern":"^[0-9]+$" + }, + "NumberLengthBetween4And16":{ + "type":"string", + "max":16, + "min":4, + "pattern":"^[0-9]+$" + }, + "NumberLengthEquals16":{ + "type":"string", + "max":16, + "min":16, + "pattern":"^[0-9]+$" + }, + "NumberLengthEquals3":{ + "type":"string", + "max":3, + "min":3, + "pattern":"^[0-9]+$" + }, + "NumberLengthEquals4":{ + "type":"string", + "max":4, + "min":4, + "pattern":"^[0-9]+$" + }, + "PaddingType":{ + "type":"string", + "enum":[ + "PKCS1", + "OAEP_SHA1", + "OAEP_SHA256", + "OAEP_SHA512" + ] + }, + "PinBlockFormatForPinData":{ + "type":"string", + "enum":[ + "ISO_FORMAT_0", + "ISO_FORMAT_3" + ] + }, + "PinData":{ + "type":"structure", + "members":{ + "PinOffset":{ + "shape":"NumberLengthBetween4And12", + "documentation":"

      The PIN offset value.

      " + }, + "VerificationValue":{ + "shape":"NumberLengthBetween4And12", + "documentation":"

      The unique data to identify a cardholder. In most cases, this is the same as cardholder's Primary Account Number (PAN). If a value is not provided, it defaults to PAN.

      " + } + }, + "documentation":"

      Parameters that are required to generate, translate, or verify PIN data.

      ", + "union":true + }, + "PinGenerationAttributes":{ + "type":"structure", + "members":{ + "Ibm3624NaturalPin":{ + "shape":"Ibm3624NaturalPin", + "documentation":"

      Parameters that are required to generate or verify Ibm3624 natural PIN.

      " + }, + "Ibm3624PinFromOffset":{ + "shape":"Ibm3624PinFromOffset", + "documentation":"

      Parameters that are required to generate or verify Ibm3624 PIN from offset PIN.

      " + }, + "Ibm3624PinOffset":{ + "shape":"Ibm3624PinOffset", + "documentation":"

      Parameters that are required to generate or verify Ibm3624 PIN offset PIN.

      " + }, + "Ibm3624RandomPin":{ + "shape":"Ibm3624RandomPin", + "documentation":"

      Parameters that are required to generate or verify Ibm3624 random PIN.

      " + }, + "VisaPin":{ + "shape":"VisaPin", + "documentation":"

      Parameters that are required to generate or verify Visa PIN.

      " + }, + "VisaPinVerificationValue":{ + "shape":"VisaPinVerificationValue", + "documentation":"

      Parameters that are required to generate or verify Visa PIN Verification Value (PVV).

      " + } + }, + "documentation":"

      Parameters that are required for PIN data generation.

      ", + "union":true + }, + "PinVerificationAttributes":{ + "type":"structure", + "members":{ + "Ibm3624Pin":{ + "shape":"Ibm3624PinVerification", + "documentation":"

      Parameters that are required to generate or verify Ibm3624 PIN.

      " + }, + "VisaPin":{ + "shape":"VisaPinVerification", + "documentation":"

      Parameters that are required to generate or verify Visa PIN.

      " + } + }, + "documentation":"

      Parameters that are required for PIN data verification.

      ", + "union":true + }, + "ReEncryptDataInput":{ + "type":"structure", + "required":[ + "CipherText", + "IncomingEncryptionAttributes", + "IncomingKeyIdentifier", + "OutgoingEncryptionAttributes", + "OutgoingKeyIdentifier" + ], + "members":{ + "CipherText":{ + "shape":"HexEvenLengthBetween16And4096", + "documentation":"

      Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes.

      " + }, + "IncomingEncryptionAttributes":{ + "shape":"ReEncryptionAttributes", + "documentation":"

      The attributes and values for incoming ciphertext.

      " + }, + "IncomingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key of incoming ciphertext data.

      ", + "location":"uri", + "locationName":"IncomingKeyIdentifier" + }, + "OutgoingEncryptionAttributes":{ + "shape":"ReEncryptionAttributes", + "documentation":"

      The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

      " + }, + "OutgoingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography.

      " + } + } + }, + "ReEncryptDataOutput":{ + "type":"structure", + "required":[ + "CipherText", + "KeyArn", + "KeyCheckValue" + ], + "members":{ + "CipherText":{ + "shape":"HexEvenLengthBetween16And4096", + "documentation":"

      The encrypted ciphertext.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN (Amazon Resource Name) of the encryption key that Amazon Web Services Payment Cryptography uses for plaintext encryption.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "ReEncryptionAttributes":{ + "type":"structure", + "members":{ + "Dukpt":{"shape":"DukptEncryptionAttributes"}, + "Symmetric":{ + "shape":"SymmetricEncryptionAttributes", + "documentation":"

      Parameters that are required to encrypt data using symmetric keys.

      " + } + }, + "documentation":"

      Parameters that are required to perform reencryption operation.

      ", + "union":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "documentation":"

      The resource that is missing.

      " + } + }, + "documentation":"

      The request was denied due to an invalid resource error.

      ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SessionKeyAmex":{ + "type":"structure", + "required":[ + "PanSequenceNumber", + "PrimaryAccountNumber" + ], + "members":{ + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + } + }, + "documentation":"

      Parameters to derive session key for an Amex payment card.

      " + }, + "SessionKeyDerivation":{ + "type":"structure", + "members":{ + "Amex":{ + "shape":"SessionKeyAmex", + "documentation":"

      Parameters to derive session key for an Amex payment card for ARQC verification.

      " + }, + "Emv2000":{ + "shape":"SessionKeyEmv2000", + "documentation":"

      Parameters to derive session key for an Emv2000 payment card for ARQC verification.

      " + }, + "EmvCommon":{ + "shape":"SessionKeyEmvCommon", + "documentation":"

      Parameters to derive session key for an Emv common payment card for ARQC verification.

      " + }, + "Mastercard":{ + "shape":"SessionKeyMastercard", + "documentation":"

      Parameters to derive session key for a Mastercard payment card for ARQC verification.

      " + }, + "Visa":{ + "shape":"SessionKeyVisa", + "documentation":"

      Parameters to derive session key for a Visa payment cardfor ARQC verification.

      " + } + }, + "documentation":"

      Parameters to derive a session key for Authorization Response Cryptogram (ARQC) verification.

      ", + "union":true + }, + "SessionKeyDerivationMode":{ + "type":"string", + "enum":[ + "EMV_COMMON_SESSION_KEY", + "EMV2000", + "AMEX", + "MASTERCARD_SESSION_KEY", + "VISA" + ] + }, + "SessionKeyDerivationValue":{ + "type":"structure", + "members":{ + "ApplicationCryptogram":{ + "shape":"HexLengthEquals16", + "documentation":"

      The cryptogram provided by the terminal during transaction processing.

      " + }, + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter that is provided by the terminal during transaction processing.

      " + } + }, + "documentation":"

      Parameters to derive session key value using a MAC EMV algorithm.

      ", + "union":true + }, + "SessionKeyEmv2000":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "PanSequenceNumber", + "PrimaryAccountNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter that is provided by the terminal during transaction processing.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + } + }, + "documentation":"

      Parameters to derive session key for an Emv2000 payment card for ARQC verification.

      " + }, + "SessionKeyEmvCommon":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "PanSequenceNumber", + "PrimaryAccountNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter that is provided by the terminal during transaction processing.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + } + }, + "documentation":"

      Parameters to derive session key for an Emv common payment card for ARQC verification.

      " + }, + "SessionKeyMastercard":{ + "type":"structure", + "required":[ + "ApplicationTransactionCounter", + "PanSequenceNumber", + "PrimaryAccountNumber", + "UnpredictableNumber" + ], + "members":{ + "ApplicationTransactionCounter":{ + "shape":"HexLengthBetween2And4", + "documentation":"

      The transaction counter that is provided by the terminal during transaction processing.

      " + }, + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + }, + "UnpredictableNumber":{ + "shape":"HexLengthBetween2And8", + "documentation":"

      A random number generated by the issuer.

      " + } + }, + "documentation":"

      Parameters to derive session key for Mastercard payment card for ARQC verification.

      " + }, + "SessionKeyVisa":{ + "type":"structure", + "required":[ + "PanSequenceNumber", + "PrimaryAccountNumber" + ], + "members":{ + "PanSequenceNumber":{ + "shape":"HexLengthEquals2", + "documentation":"

      A number that identifies and differentiates payment cards with the same Primary Account Number (PAN).

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + } + }, + "documentation":"

      Parameters to derive session key for Visa payment card for ARQC verification.

      " + }, + "String":{"type":"string"}, + "SymmetricEncryptionAttributes":{ + "type":"structure", + "required":["Mode"], + "members":{ + "InitializationVector":{ + "shape":"HexLength16Or32", + "documentation":"

      An input to cryptographic primitive used to provide the intial state. The InitializationVector is typically required have a random or psuedo-random value, but sometimes it only needs to be unpredictable or unique. If a value is not provided, Amazon Web Services Payment Cryptography generates a random value.

      " + }, + "Mode":{ + "shape":"EncryptionMode", + "documentation":"

      The block cipher mode of operation. Block ciphers are designed to encrypt a block of data of fixed size (for example, 128 bits). The size of the input block is usually same as the size of the encrypted output block, while the key length can be different. A mode of operation describes how to repeatedly apply a cipher's single-block operation to securely transform amounts of data larger than a block.

      " + }, + "PaddingType":{ + "shape":"PaddingType", + "documentation":"

      The padding to be included with the data.

      " + } + }, + "documentation":"

      Parameters requried to encrypt plaintext data using symmetric keys.

      " + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

      The request was denied due to request throttling.

      ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "TranslatePinDataInput":{ + "type":"structure", + "required":[ + "EncryptedPinBlock", + "IncomingKeyIdentifier", + "IncomingTranslationAttributes", + "OutgoingKeyIdentifier", + "OutgoingTranslationAttributes" + ], + "members":{ + "EncryptedPinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The encrypted PIN block data that Amazon Web Services Payment Cryptography translates.

      " + }, + "IncomingDukptAttributes":{ + "shape":"DukptDerivationAttributes", + "documentation":"

      The attributes and values to use for incoming DUKPT encryption key for PIN block tranlation.

      " + }, + "IncomingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK.

      " + }, + "IncomingTranslationAttributes":{ + "shape":"TranslationIsoFormats", + "documentation":"

      The format of the incoming PIN block data for tranlation within Amazon Web Services Payment Cryptography.

      " + }, + "OutgoingDukptAttributes":{ + "shape":"DukptDerivationAttributes", + "documentation":"

      The attributes and values to use for outgoing DUKPT encryption key after PIN block translation.

      " + }, + "OutgoingKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK.

      " + }, + "OutgoingTranslationAttributes":{ + "shape":"TranslationIsoFormats", + "documentation":"

      The format of the outgoing PIN block data after tranlation by Amazon Web Services Payment Cryptography.

      " + } + } + }, + "TranslatePinDataOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue", + "PinBlock" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to encrypt outgoing PIN block data after translation.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "PinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The ougoing encrypted PIN block data after tranlation.

      " + } + } + }, + "TranslationIsoFormats":{ + "type":"structure", + "members":{ + "IsoFormat0":{ + "shape":"TranslationPinDataIsoFormat034", + "documentation":"

      Parameters that are required for ISO9564 PIN format 0 tranlation.

      " + }, + "IsoFormat1":{ + "shape":"TranslationPinDataIsoFormat1", + "documentation":"

      Parameters that are required for ISO9564 PIN format 1 tranlation.

      " + }, + "IsoFormat3":{ + "shape":"TranslationPinDataIsoFormat034", + "documentation":"

      Parameters that are required for ISO9564 PIN format 3 tranlation.

      " + }, + "IsoFormat4":{ + "shape":"TranslationPinDataIsoFormat034", + "documentation":"

      Parameters that are required for ISO9564 PIN format 4 tranlation.

      " + } + }, + "documentation":"

      Parameters that are required for translation between ISO9564 PIN block formats 0,1,3,4.

      ", + "union":true + }, + "TranslationPinDataIsoFormat034":{ + "type":"structure", + "required":["PrimaryAccountNumber"], + "members":{ + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

      " + } + }, + "documentation":"

      Parameters that are required for tranlation between ISO9564 PIN format 0,3,4 tranlation.

      " + }, + "TranslationPinDataIsoFormat1":{ + "type":"structure", + "members":{ + }, + "documentation":"

      Parameters that are required for ISO9564 PIN format 1 tranlation.

      " + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

      The request was denied due to an invalid request error.

      " + }, + "message":{"shape":"String"} + }, + "documentation":"

      The request was denied due to an invalid request error.

      ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "path" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

      The request was denied due to an invalid request error.

      " + }, + "path":{ + "shape":"String", + "documentation":"

      The request was denied due to an invalid request error.

      " + } + }, + "documentation":"

      The request was denied due to an invalid request error.

      " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "VerificationFailedException":{ + "type":"structure", + "required":[ + "Message", + "Reason" + ], + "members":{ + "Message":{"shape":"String"}, + "Reason":{ + "shape":"VerificationFailedReason", + "documentation":"

      The reason for the exception.

      " + } + }, + "documentation":"

      This request failed verification.

      ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VerificationFailedReason":{ + "type":"string", + "enum":[ + "INVALID_MAC", + "INVALID_PIN", + "INVALID_VALIDATION_DATA", + "INVALID_AUTH_REQUEST_CRYPTOGRAM" + ] + }, + "VerifyAuthRequestCryptogramInput":{ + "type":"structure", + "required":[ + "AuthRequestCryptogram", + "KeyIdentifier", + "MajorKeyDerivationMode", + "SessionKeyDerivationAttributes", + "TransactionData" + ], + "members":{ + "AuthRequestCryptogram":{ + "shape":"HexLengthEquals16", + "documentation":"

      The auth request cryptogram imported into Amazon Web Services Payment Cryptography for ARQC verification using a major encryption key and transaction data.

      " + }, + "AuthResponseAttributes":{ + "shape":"CryptogramAuthResponse", + "documentation":"

      The attributes and values for auth request cryptogram verification. These parameters are required in case using ARPC Method 1 or Method 2 for ARQC verification.

      " + }, + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the major encryption key that Amazon Web Services Payment Cryptography uses for ARQC verification.

      " + }, + "MajorKeyDerivationMode":{ + "shape":"MajorKeyDerivationMode", + "documentation":"

      The method to use when deriving the major encryption key for ARQC verification within Amazon Web Services Payment Cryptography. The same key derivation mode was used for ARQC generation outside of Amazon Web Services Payment Cryptography.

      " + }, + "SessionKeyDerivationAttributes":{ + "shape":"SessionKeyDerivation", + "documentation":"

      The attributes and values to use for deriving a session key for ARQC verification within Amazon Web Services Payment Cryptography. The same attributes were used for ARQC generation outside of Amazon Web Services Payment Cryptography.

      " + }, + "TransactionData":{ + "shape":"HexLengthBetween2And1024", + "documentation":"

      The transaction data that Amazon Web Services Payment Cryptography uses for ARQC verification. The same transaction is used for ARQC generation outside of Amazon Web Services Payment Cryptography.

      " + } + } + }, + "VerifyAuthRequestCryptogramOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue" + ], + "members":{ + "AuthResponseValue":{ + "shape":"HexLengthBetween1And16", + "documentation":"

      The result for ARQC verification or ARPC generation within Amazon Web Services Payment Cryptography.

      " + }, + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the major encryption key that Amazon Web Services Payment Cryptography uses for ARQC verification.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "VerifyCardValidationDataInput":{ + "type":"structure", + "required":[ + "KeyIdentifier", + "PrimaryAccountNumber", + "ValidationData", + "VerificationAttributes" + ], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to verify card data.

      " + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

      " + }, + "ValidationData":{ + "shape":"NumberLengthBetween3And5", + "documentation":"

      The CVV or CSC value for use for card data verification within Amazon Web Services Payment Cryptography.

      " + }, + "VerificationAttributes":{ + "shape":"CardVerificationAttributes", + "documentation":"

      The algorithm to use for verification of card data within Amazon Web Services Payment Cryptography.

      " + } + } + }, + "VerifyCardValidationDataOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the CVK encryption key that Amazon Web Services Payment Cryptography uses to verify CVV or CSC.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "VerifyMacInput":{ + "type":"structure", + "required":[ + "KeyIdentifier", + "Mac", + "MessageData", + "VerificationAttributes" + ], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to verify MAC data.

      " + }, + "Mac":{ + "shape":"HexLengthBetween4And128", + "documentation":"

      The MAC being verified.

      " + }, + "MacLength":{ + "shape":"IntegerRangeBetween4And16", + "documentation":"

      The length of the MAC.

      " + }, + "MessageData":{ + "shape":"HexLengthBetween2And4096", + "documentation":"

      The data on for which MAC is under verification.

      " + }, + "VerificationAttributes":{ + "shape":"MacAttributes", + "documentation":"

      The attributes and data values to use for MAC verification within Amazon Web Services Payment Cryptography.

      " + } + } + }, + "VerifyMacOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses for MAC verification.

      " + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "VerifyPinDataInput":{ + "type":"structure", + "required":[ + "EncryptedPinBlock", + "EncryptionKeyIdentifier", + "PinBlockFormat", + "PrimaryAccountNumber", + "VerificationAttributes", + "VerificationKeyIdentifier" + ], + "members":{ + "DukptAttributes":{ + "shape":"DukptAttributes", + "documentation":"

      The attributes and values for the DUKPT encrypted PIN block data.

      " + }, + "EncryptedPinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The encrypted PIN block data that Amazon Web Services Payment Cryptography verifies.

      " + }, + "EncryptionKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the encryption key under which the PIN block data is encrypted. This key type can be PEK or BDK.

      " + }, + "PinBlockFormat":{ + "shape":"PinBlockFormatForPinData", + "documentation":"

      The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0 and ISO_Format_3.

      The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

      The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

      " + }, + "PinDataLength":{ + "shape":"IntegerRangeBetween4And12", + "documentation":"

      The length of PIN being verified.

      ", + "box":true + }, + "PrimaryAccountNumber":{ + "shape":"NumberLengthBetween12And19", + "documentation":"

      The Primary Account Number (PAN), a unique identifier for a payment credit or debit card that associates the card with a specific account holder.

      " + }, + "VerificationAttributes":{ + "shape":"PinVerificationAttributes", + "documentation":"

      The attributes and values for PIN data verification.

      " + }, + "VerificationKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

      The keyARN of the PIN verification key.

      " + } + } + }, + "VerifyPinDataOutput":{ + "type":"structure", + "required":[ + "EncryptionKeyArn", + "EncryptionKeyCheckValue", + "VerificationKeyArn", + "VerificationKeyCheckValue" + ], + "members":{ + "EncryptionKeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the PEK that Amazon Web Services Payment Cryptography uses for encrypted pin block generation.

      " + }, + "EncryptionKeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + }, + "VerificationKeyArn":{ + "shape":"KeyArn", + "documentation":"

      The keyARN of the PIN encryption key that Amazon Web Services Payment Cryptography uses for PIN or PIN Offset verification.

      " + }, + "VerificationKeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

      The key check value (KCV) of the encryption key. The KCV is used to check if all parties holding a given key have the same key or to detect that a key has changed. Amazon Web Services Payment Cryptography calculates the KCV by using standard algorithms, typically by encrypting 8 or 16 bytes or \"00\" or \"01\" and then truncating the result to the first 3 bytes, or 6 hex digits, of the resulting cryptogram.

      " + } + } + }, + "VisaPin":{ + "type":"structure", + "required":["PinVerificationKeyIndex"], + "members":{ + "PinVerificationKeyIndex":{ + "shape":"IntegerRangeBetween0And9", + "documentation":"

      The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Visa PIN.

      " + }, + "VisaPinVerification":{ + "type":"structure", + "required":[ + "PinVerificationKeyIndex", + "VerificationValue" + ], + "members":{ + "PinVerificationKeyIndex":{ + "shape":"IntegerRangeBetween0And9", + "documentation":"

      The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

      " + }, + "VerificationValue":{ + "shape":"NumberLengthBetween4And12", + "documentation":"

      Parameters that are required to generate or verify Visa PVV (PIN Verification Value).

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Visa PIN.

      " + }, + "VisaPinVerificationValue":{ + "type":"structure", + "required":[ + "EncryptedPinBlock", + "PinVerificationKeyIndex" + ], + "members":{ + "EncryptedPinBlock":{ + "shape":"HexLengthBetween16And32", + "documentation":"

      The encrypted PIN block data to verify.

      " + }, + "PinVerificationKeyIndex":{ + "shape":"IntegerRangeBetween0And9", + "documentation":"

      The value for PIN verification index. It is used in the Visa PIN algorithm to calculate the PVV (PIN Verification Value).

      " + } + }, + "documentation":"

      Parameters that are required to generate or verify Visa PVV (PIN Verification Value).

      " + } + }, + "documentation":"

      You use the Amazon Web Services Payment Cryptography Data Plane to manage how encryption keys are used for payment-related transaction processing and associated cryptographic operations. You can encrypt, decrypt, generate, verify, and translate payment-related cryptographic operations in Amazon Web Services Payment Cryptography. For more information, see Data operations in the Amazon Web Services Payment Cryptography User Guide.

      To manage your encryption keys, you use the Amazon Web Services Payment Cryptography Control Plane. You can create, import, export, share, manage, and delete keys. You can also manage Identity and Access Management (IAM) policies for keys.

      " +} diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index b71475b8d98a..6b1115a77ed4 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalize/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/personalize/src/main/resources/codegen-resources/endpoint-rule-set.json index ed863d73bf5c..0f0b99c1ae42 100644 --- a/services/personalize/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/personalize/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://personalize-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://personalize-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://personalize-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://personalize.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://personalize-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://personalize.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://personalize.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://personalize.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/personalize/src/main/resources/codegen-resources/endpoint-tests.json b/services/personalize/src/main/resources/codegen-resources/endpoint-tests.json index 5f7559a88039..a076e4c7bef2 100644 --- a/services/personalize/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/personalize/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-south-1.amazonaws.com" + "url": "https://personalize.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-south-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-south-1.api.aws" + "url": "https://personalize.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -47,48 +34,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ca-central-1.amazonaws.com" + "url": "https://personalize.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ca-central-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ca-central-1.api.aws" + "url": "https://personalize.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -99,48 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -151,100 +86,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -255,113 +99,100 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-northeast-2.amazonaws.com" + "url": "https://personalize.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-2" + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-northeast-2.api.aws" + "url": "https://personalize.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-northeast-2.amazonaws.com" + "url": "https://personalize.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-northeast-1.api.aws" + "url": "https://personalize-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-northeast-1.amazonaws.com" + "url": "https://personalize-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://personalize.ap-northeast-1.api.aws" + "url": "https://personalize.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-northeast-1.amazonaws.com" + "url": "https://personalize.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -372,9 +203,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -385,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -398,243 +229,183 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://personalize.cn-north-1.amazonaws.com.cn" + "url": "https://personalize-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "cn-north-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-southeast-1.amazonaws.com" + "url": "https://personalize-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://personalize.ap-southeast-1.api.aws" + "url": "https://personalize.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-southeast-1.amazonaws.com" + "url": "https://personalize.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://personalize-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.ap-southeast-2.amazonaws.com" + "url": "https://personalize-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://personalize.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.ap-southeast-2.amazonaws.com" + "url": "https://personalize.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://personalize-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize-fips.us-east-1.amazonaws.com" + "url": "https://personalize-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://personalize.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://personalize.us-east-1.amazonaws.com" + "url": "https://personalize.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://personalize.us-east-2.api.aws" + "url": "https://example.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://personalize.us-east-2.amazonaws.com" - } - }, - "params": { "UseDualStack": false, - "UseFIPS": false, - "Region": "us-east-2" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -644,9 +415,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -656,11 +427,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/personalize/src/main/resources/codegen-resources/service-2.json b/services/personalize/src/main/resources/codegen-resources/service-2.json index dd5802031237..a2db3f61df38 100644 --- a/services/personalize/src/main/resources/codegen-resources/service-2.json +++ b/services/personalize/src/main/resources/codegen-resources/service-2.json @@ -65,7 +65,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

      Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

      Minimum Provisioned TPS and Auto-Scaling

      A transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge.

      If your TPS increases beyond minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions.

      The actual TPS used is calculated as the average requests/second within a 5-minute window. You pay for maximum of either the minimum provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

      Status

      A campaign can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the campaign status, call DescribeCampaign.

      Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

      Related APIs

      ", + "documentation":"

      Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

      Minimum Provisioned TPS and Auto-Scaling

      A high minProvisionedTPS will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

      A transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge.

      If your TPS increases beyond minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions.

      The actual TPS used is calculated as the average requests/second within a 5-minute window. You pay for maximum of either the minimum provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

      Status

      A campaign can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the campaign status, call DescribeCampaign.

      Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

      Related APIs

      ", "idempotent":true }, "CreateDataset":{ @@ -209,7 +209,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

      Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.

      Minimum recommendation requests per second

      When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1. A recommendation request is a single GetRecommendations operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.

      If your requests per second increases beyond minRecommendationRequestsPerSecond, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond. There's a short time delay while the capacity is increased that might cause loss of requests.

      Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond as necessary.

      Status

      A recommender can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the recommender status, call DescribeRecommender.

      Wait until the status of the recommender is ACTIVE before asking the recommender for recommendations.

      Related APIs

      ", + "documentation":"

      Creates a recommender with the recipe (a Domain dataset group use case) you specify. You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a GetRecommendations request.

      Minimum recommendation requests per second

      A high minRecommendationRequestsPerSecond will increase your bill. We recommend starting with 1 for minRecommendationRequestsPerSecond (the default). Track your usage using Amazon CloudWatch metrics, and increase the minRecommendationRequestsPerSecond as necessary.

      When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second (minRecommendationRequestsPerSecond) specifies the baseline recommendation request throughput provisioned by Amazon Personalize. The default minRecommendationRequestsPerSecond is 1. A recommendation request is a single GetRecommendations operation. Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive your requests per hour and the price of your recommender usage.

      If your requests per second increases beyond minRecommendationRequestsPerSecond, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minRecommendationRequestsPerSecond. There's a short time delay while the capacity is increased that might cause loss of requests.

      Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond) or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window. We recommend starting with the default minRecommendationRequestsPerSecond, track your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond as necessary.

      Status

      A recommender can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • STOP PENDING > STOP IN_PROGRESS > INACTIVE > START PENDING > START IN_PROGRESS > ACTIVE

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the recommender status, call DescribeRecommender.

      Wait until the status of the recommender is ACTIVE before asking the recommender for recommendations.

      Related APIs

      ", "idempotent":true }, "CreateSchema":{ @@ -244,7 +244,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

      Creates the configuration for training a model. A trained model is known as a solution. After the configuration is created, you train the model (create a solution) by calling the CreateSolutionVersion operation. Every time you call CreateSolutionVersion, a new version of the solution is created.

      After creating a solution version, you check its accuracy by calling GetSolutionMetrics. When you are satisfied with the version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API.

      To train a model, Amazon Personalize requires training data and a recipe. The training data comes from the dataset group that you provide in the request. A recipe specifies the training algorithm and a feature transformation. You can specify one of the predefined recipes provided by Amazon Personalize. Alternatively, you can specify performAutoML and Amazon Personalize will analyze your data and select the optimum USER_PERSONALIZATION recipe for you.

      Amazon Personalize doesn't support configuring the hpoObjective for solution hyperparameter optimization at this time.

      Status

      A solution can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the status of the solution, call DescribeSolution. Wait until the status shows as ACTIVE before calling CreateSolutionVersion.

      Related APIs

      " + "documentation":"

      Creates the configuration for training a model. A trained model is known as a solution version. After the configuration is created, you train the model (create a solution version) by calling the CreateSolutionVersion operation. Every time you call CreateSolutionVersion, a new version of the solution is created.

      After creating a solution version, you check its accuracy by calling GetSolutionMetrics. When you are satisfied with the version, you deploy it using CreateCampaign. The campaign provides recommendations to a client through the GetRecommendations API.

      To train a model, Amazon Personalize requires training data and a recipe. The training data comes from the dataset group that you provide in the request. A recipe specifies the training algorithm and a feature transformation. You can specify one of the predefined recipes provided by Amazon Personalize.

      Amazon Personalize doesn't support configuring the hpoObjective for solution hyperparameter optimization at this time.

      Status

      A solution can be in one of the following states:

      • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

      • DELETE PENDING > DELETE IN_PROGRESS

      To get the status of the solution, call DescribeSolution. Wait until the status shows as ACTIVE before calling CreateSolutionVersion.

      Related APIs

      " }, "CreateSolutionVersion":{ "name":"CreateSolutionVersion", @@ -919,7 +919,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

      Get a list of tags attached to a resource.

      " + "documentation":"

      Get a list of tags attached to a resource.

      " }, "StartRecommender":{ "name":"StartRecommender", @@ -999,7 +999,7 @@ {"shape":"TooManyTagKeysException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

      Remove tags that are attached to a resource.

      " + "documentation":"

      Remove tags that are attached to a resource.

      " }, "UpdateCampaign":{ "name":"UpdateCampaign", @@ -1046,7 +1046,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

      Updates the recommender to modify the recommender configuration.

      ", + "documentation":"

      Updates the recommender to modify the recommender configuration. If you update the recommender to modify the columns used in training, Amazon Personalize automatically starts a full retraining of the models backing your recommender. While the update completes, you can still get recommendations from the recommender. The recommender uses the previous configuration until the update completes. To track the status of this update, use the latestRecommenderUpdate returned in the DescribeRecommender operation.

      ", "idempotent":true } }, @@ -1308,7 +1308,7 @@ }, "numResults":{ "shape":"NumBatchResults", - "documentation":"

      The number of predicted users generated by the batch segment job for each line of input data.

      " + "documentation":"

      The number of predicted users generated by the batch segment job for each line of input data. The maximum number of users per segment is 5 million.

      " }, "jobInput":{ "shape":"BatchSegmentJobInput", @@ -1410,7 +1410,7 @@ }, "minProvisionedTPS":{ "shape":"TransactionsPerSecond", - "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second.

      " + "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second. A high minProvisionedTPS will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

      " }, "campaignConfig":{ "shape":"CampaignConfig", @@ -1540,6 +1540,15 @@ "member":{"shape":"CategoricalValue"}, "max":100 }, + "ColumnName":{ + "type":"string", + "max":150 + }, + "ColumnNamesList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":50 + }, "ContinuousHyperParameterRange":{ "type":"structure", "members":{ @@ -1615,7 +1624,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the batch inference job.

      " + "documentation":"

      A list of tags to apply to the batch inference job.

      " } } }, @@ -1652,7 +1661,7 @@ }, "numResults":{ "shape":"NumBatchResults", - "documentation":"

      The number of predicted users generated by the batch segment job for each line of input data.

      " + "documentation":"

      The number of predicted users generated by the batch segment job for each line of input data. The maximum number of users per segment is 5 million.

      " }, "jobInput":{ "shape":"BatchSegmentJobInput", @@ -1668,7 +1677,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the batch segment job.

      " + "documentation":"

      A list of tags to apply to the batch segment job.

      " } } }, @@ -1698,7 +1707,7 @@ }, "minProvisionedTPS":{ "shape":"TransactionsPerSecond", - "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second that Amazon Personalize will support.

      " + "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second that Amazon Personalize will support. A high minProvisionedTPS will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

      " }, "campaignConfig":{ "shape":"CampaignConfig", @@ -1706,7 +1715,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the campaign.

      " + "documentation":"

      A list of tags to apply to the campaign.

      " } } }, @@ -1750,7 +1759,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the dataset export job.

      " + "documentation":"

      A list of tags to apply to the dataset export job.

      " } } }, @@ -1785,7 +1794,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the dataset group.

      " + "documentation":"

      A list of tags to apply to the dataset group.

      " } } }, @@ -1829,7 +1838,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the dataset import job.

      " + "documentation":"

      A list of tags to apply to the dataset import job.

      " }, "importMode":{ "shape":"ImportMode", @@ -1877,7 +1886,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the dataset.

      " + "documentation":"

      A list of tags to apply to the dataset.

      " } } }, @@ -1907,7 +1916,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the event tracker.

      " + "documentation":"

      A list of tags to apply to the event tracker.

      " } } }, @@ -1946,7 +1955,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the filter.

      " + "documentation":"

      A list of tags to apply to the filter.

      " } } }, @@ -2021,7 +2030,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the recommender.

      " + "documentation":"

      A list of tags to apply to the recommender.

      " } } }, @@ -2081,7 +2090,7 @@ }, "performAutoML":{ "shape":"PerformAutoML", - "documentation":"

      Whether to perform automated machine learning (AutoML). The default is false. For this case, you must specify recipeArn.

      When set to true, Amazon Personalize analyzes your training data and selects the optimal USER_PERSONALIZATION recipe and hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines the optimal recipe by running tests with different values for the hyperparameters. AutoML lengthens the training process as compared to selecting a specific recipe.

      " + "documentation":"

      We don't recommend enabling automated machine learning. Instead, match your use case to the available Amazon Personalize recipes. For more information, see Determining your use case.

      Whether to perform automated machine learning (AutoML). The default is false. For this case, you must specify recipeArn.

      When set to true, Amazon Personalize analyzes your training data and selects the optimal USER_PERSONALIZATION recipe and hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines the optimal recipe by running tests with different values for the hyperparameters. AutoML lengthens the training process as compared to selecting a specific recipe.

      " }, "recipeArn":{ "shape":"Arn", @@ -2101,7 +2110,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the solution.

      " + "documentation":"

      A list of tags to apply to the solution.

      " } } }, @@ -2132,7 +2141,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      A list of tags to apply to the solution version.

      " + "documentation":"

      A list of tags to apply to the solution version.

      " } } }, @@ -3146,6 +3155,12 @@ "type":"string", "max":256 }, + "ExcludedDatasetColumns":{ + "type":"map", + "key":{"shape":"DatasetType"}, + "value":{"shape":"ColumnNamesList"}, + "max":3 + }, "FailureReason":{"type":"string"}, "FeatureTransformation":{ "type":"structure", @@ -4282,7 +4297,11 @@ }, "minRecommendationRequestsPerSecond":{ "shape":"TransactionsPerSecond", - "documentation":"

      Specifies the requested minimum provisioned recommendation requests per second that Amazon Personalize will support.

      " + "documentation":"

      Specifies the requested minimum provisioned recommendation requests per second that Amazon Personalize will support. A high minRecommendationRequestsPerSecond will increase your bill. We recommend starting with 1 for minRecommendationRequestsPerSecond (the default). Track your usage using Amazon CloudWatch metrics, and increase the minRecommendationRequestsPerSecond as necessary.

      " + }, + "trainingDataConfig":{ + "shape":"TrainingDataConfig", + "documentation":"

      Specifies the training data configuration to use when creating a domain recommender.

      " } }, "documentation":"

      The configuration details of the recommender.

      " @@ -4433,7 +4452,7 @@ }, "performAutoML":{ "shape":"PerformAutoML", - "documentation":"

      When true, Amazon Personalize performs a search for the best USER_PERSONALIZATION recipe from the list specified in the solution configuration (recipeArn must not be specified). When false (the default), Amazon Personalize uses recipeArn for training.

      " + "documentation":"

      We don't recommend enabling automated machine learning. Instead, match your use case to the available Amazon Personalize recipes. For more information, see Determining your use case.

      When true, Amazon Personalize performs a search for the best USER_PERSONALIZATION recipe from the list specified in the solution configuration (recipeArn must not be specified). When false (the default), Amazon Personalize uses recipeArn for training.

      " }, "recipeArn":{ "shape":"Arn", @@ -4500,6 +4519,10 @@ "optimizationObjective":{ "shape":"OptimizationObjective", "documentation":"

      Describes the additional objective for the solution, such as maximizing streaming minutes or increasing revenue. For more information see Optimizing a solution.

      " + }, + "trainingDataConfig":{ + "shape":"TrainingDataConfig", + "documentation":"

      Specifies the training data configuration to use when creating a custom solution version (trained model).

      " } }, "documentation":"

      Describes the configuration properties for the solution.

      " @@ -4708,7 +4731,7 @@ "documentation":"

      The optional part of a key-value pair that makes up a tag. A value acts as a descriptor within a tag category (key).

      " } }, - "documentation":"

      The optional metadata that you apply to resources to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. For more information see Tagging Personalize resources.

      " + "documentation":"

      The optional metadata that you apply to resources to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. For more information see Tagging Amazon Personalize recources.

      " }, "TagKey":{ "type":"string", @@ -4735,7 +4758,7 @@ }, "tags":{ "shape":"Tags", - "documentation":"

      Tags to apply to the resource. For more information see Tagging Personalize resources.

      " + "documentation":"

      Tags to apply to the resource. For more information see Tagging Amazon Personalize recources.

      " } } }, @@ -4776,6 +4799,16 @@ "type":"string", "max":256 }, + "TrainingDataConfig":{ + "type":"structure", + "members":{ + "excludedDatasetColumns":{ + "shape":"ExcludedDatasetColumns", + "documentation":"

      Specifies the columns to exclude from training. Each key is a dataset type, and each value is a list of columns. Exclude columns to control what data Amazon Personalize uses to generate recommendations. For example, you might have a column that you want to use only to filter recommendations. You can exclude this column from training and Amazon Personalize considers it only when filtering.

      " + } + }, + "documentation":"

      The training data configuration to use when creating a domain recommender or custom solution version (trained model).

      " + }, "TrainingHours":{ "type":"double", "min":0 @@ -4842,7 +4875,7 @@ }, "minProvisionedTPS":{ "shape":"TransactionsPerSecond", - "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second that Amazon Personalize will support.

      " + "documentation":"

      Specifies the requested minimum provisioned transactions (recommendations) per second that Amazon Personalize will support. A high minProvisionedTPS will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

      " }, "campaignConfig":{ "shape":"CampaignConfig", diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index b29b3a813511..94c1c170fa8a 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 987e6749f0b9..35bed6ed2dd2 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 12e8caa3e56d..ae53cf3684e8 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index 264ae50d9a90..8d8076ff782d 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/service-2.json b/services/pinpoint/src/main/resources/codegen-resources/service-2.json index 4ac08341b7ff..4bd141375f91 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/service-2.json +++ b/services/pinpoint/src/main/resources/codegen-resources/service-2.json @@ -8666,7 +8666,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateDescription": { "shape": "__string", @@ -11810,7 +11810,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateDescription": { "shape": "__string", @@ -13125,7 +13125,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateDescription": { "shape": "__string", @@ -13650,7 +13650,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateDescription": { "shape": "__string", @@ -15757,7 +15757,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateDescription": { "shape": "__string", @@ -15938,7 +15938,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the campaign. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the campaign. Each tag consists of a required tag key and an associated tag value.

      " }, "TemplateConfiguration": { "shape": "TemplateConfiguration", @@ -16076,7 +16076,7 @@ "tags": { "shape": "MapOf__string", "locationName": "tags", - "documentation": "

      A string-to-string map of key-value pairs that defines the tags to associate with the segment. Each tag consists of a required tag key and an associated tag value.

      " + "documentation": "

      As of 22-05-2023 tags has been deprecated for update operations. After this date any value in tags is not processed and an error code is not returned. To manage tags we recommend using either Tags in the API Reference for Amazon Pinpoint, resourcegroupstaggingapi commands in the AWS Command Line Interface Documentation or resourcegroupstaggingapi in the AWS SDK.

      (Deprecated) A string-to-string map of key-value pairs that defines the tags to associate with the segment. Each tag consists of a required tag key and an associated tag value.

      " } }, "documentation": "

      Specifies the configuration, dimension, and other settings for a segment. A WriteSegmentRequest object can include a Dimensions object or a SegmentGroups object, but not both.

      " diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 5ecfa91b2b2b..7e60077e320d 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 00b73b6c40ff..2ccadb8cdf34 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index e1eb66a00526..b2b4c3e8b7e7 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 48cfe011c614..c44cac05c807 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index eb9c7e256b98..e4a76b8c83d4 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/endpoint-tests.json b/services/polly/src/main/resources/codegen-resources/endpoint-tests.json index e1561a79081f..748ad8f6a2b8 100644 --- a/services/polly/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/polly/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -451,8 +451,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -464,8 +464,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -477,8 +488,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -490,8 +512,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -503,8 +536,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -516,8 +560,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -529,8 +573,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -542,8 +586,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -554,8 +598,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -566,10 +610,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 37736df2d508..0a69ffffcb17 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -421,7 +421,8 @@ "de-AT", "yue-CN", "ar-AE", - "fi-FI" + "fi-FI", + "en-IE" ] }, "LanguageCodeList":{ @@ -1101,7 +1102,9 @@ "Ruth", "Stephen", "Kazuha", - "Tomoko" + "Tomoko", + "Niamh", + "Sofie" ] }, "VoiceList":{ diff --git a/services/pom.xml b/services/pom.xml index 18f4f2fff3d3..d77c7b140dd4 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT services AWS Java SDK :: Services @@ -364,6 +364,11 @@ ivsrealtime vpclattice osis + mediapackagev2 + paymentcryptographydata + paymentcryptography + codegurusecurity + verifiedpermissions The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 8da951478c38..77acef16435e 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json b/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json index 6d77c402ab9b..db676384e56c 100644 --- a/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -164,9 +164,20 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -177,9 +188,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -190,9 +212,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -203,9 +236,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -216,9 +260,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -229,9 +273,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -254,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -266,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/pricing/src/main/resources/codegen-resources/service-2.json b/services/pricing/src/main/resources/codegen-resources/service-2.json index 1b648c420ccb..699d05c1b3df 100644 --- a/services/pricing/src/main/resources/codegen-resources/service-2.json +++ b/services/pricing/src/main/resources/codegen-resources/service-2.json @@ -23,10 +23,10 @@ "input":{"shape":"DescribeServicesRequest"}, "output":{"shape":"DescribeServicesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

      Returns the metadata for one service or a list of the metadata for all services. Use this without a service code to get the service codes for all services. Use it with a service code, such as AmazonEC2, to get information specific to that service, such as the attribute names available for that service. For example, some of the attribute names available for EC2 are volumeType, maxIopsVolume, operation, locationType, and instanceCapacity10xlarge.

      " @@ -40,10 +40,10 @@ "input":{"shape":"GetAttributeValuesRequest"}, "output":{"shape":"GetAttributeValuesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

      Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.

      " @@ -57,10 +57,10 @@ "input":{"shape":"GetPriceListFileUrlRequest"}, "output":{"shape":"GetPriceListFileUrlResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"} ], "documentation":"

      This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

      This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response.

      " }, @@ -73,10 +73,10 @@ "input":{"shape":"GetProductsRequest"}, "output":{"shape":"GetProductsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

      Returns a list of all products that match the filter criteria.

      " @@ -90,12 +90,12 @@ "input":{"shape":"ListPriceListsRequest"}, "output":{"shape":"ListPriceListsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, - {"shape":"ExpiredNextTokenException"}, - {"shape":"AccessDeniedException"} + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExpiredNextTokenException"} ], "documentation":"

      This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

      This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API.

      " } @@ -129,12 +129,13 @@ }, "BoxedInteger":{ "type":"integer", + "box":true, "max":100, "min":1 }, "CurrencyCode":{ "type":"string", - "pattern":"^[A-Z]{3}$" + "pattern":"[A-Z]{3}" }, "DescribeServicesRequest":{ "type":"structure", @@ -153,8 +154,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

      The maximum number of results that you want returned in the response.

      ", - "box":true + "documentation":"

      The maximum number of results that you want returned in the response.

      " } } }, @@ -245,8 +245,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

      The maximum number of results to return in response.

      ", - "box":true + "documentation":"

      The maximum number of results to return in response.

      " } } }, @@ -311,8 +310,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

      The maximum number of results to return in the response.

      ", - "box":true + "documentation":"

      The maximum number of results to return in the response.

      " } } }, @@ -339,7 +337,8 @@ "Message":{"shape":"errorMessage"} }, "documentation":"

      An error on the server occurred during the processing of your request. Try again later.

      ", - "exception":true + "exception":true, + "fault":true }, "InvalidNextTokenException":{ "type":"structure", @@ -406,6 +405,7 @@ }, "MaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -443,13 +443,12 @@ "type":"string", "max":2048, "min":18, - "pattern":"^arn:.+:pricing::.*:price-list/.{1,255}/.{1,32}/[A-Z]{3}/[0-9]{14}/[^/]*$" + "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9_/.-]{1,1023}" }, - "PriceListJsonItem":{"type":"string"}, "PriceListJsonItems":{ "type":"list", "member":{ - "shape":"PriceListJsonItem", + "shape":"SynthesizedJsonPriceListJsonItem", "jsonvalue":true } }, @@ -487,7 +486,8 @@ "member":{"shape":"Service"} }, "String":{"type":"string"}, + "SynthesizedJsonPriceListJsonItem":{"type":"string"}, "errorMessage":{"type":"string"} }, - "documentation":"

      Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with Amazon Web Services.

      Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

      Service Endpoint

      Amazon Web Services Price List service API provides the following two endpoints:

      • https://api.pricing.us-east-1.amazonaws.com

      • https://api.pricing.ap-south-1.amazonaws.com

      " + "documentation":"

      The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following:

      • Build cost control and scenario planning tools

      • Reconcile billing data

      • Forecast future spend for budgeting purposes

      • Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

      Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

      You can use the following endpoints for the Amazon Web Services Price List API:

      • https://api.pricing.us-east-1.amazonaws.com

      • https://api.pricing.ap-south-1.amazonaws.com

      " } diff --git a/services/pricing/src/main/resources/codegen-resources/waiters-2.json b/services/pricing/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/pricing/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index 31e871019dc6..c95e99ed443c 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 504ceefe685b..16fd0d850181 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 5cbf6e307184..20dd6f5ca181 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index c5d13b5b7010..76ef203d3fc3 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 98d7aa0d3a5f..e75273449436 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json index 488b0792e412..18fac260cadb 100644 --- a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json +++ b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,18 @@ "limit_key": "MaxResults", "result_key": "AnalysisSummaryList" }, + "ListAssetBundleExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AssetBundleExportJobSummaryList" + }, + "ListAssetBundleImportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AssetBundleImportJobSummaryList" + }, "ListDashboardVersions": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index b25101ace826..704e0bad91f7 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -989,6 +989,36 @@ ], "documentation":"

      Provides the read and write permissions for an analysis.

      " }, + "DescribeAssetBundleExportJob":{ + "name":"DescribeAssetBundleExportJob", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-export-jobs/{AssetBundleExportJobId}" + }, + "input":{"shape":"DescribeAssetBundleExportJobRequest"}, + "output":{"shape":"DescribeAssetBundleExportJobResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Describes an existing export job.

      Poll job descriptions after a job starts to know the status of the job. When a job succeeds, a URL is provided to download the exported assets' data from. Download URLs are valid for five minutes after they are generated. You can call the DescribeAssetBundleExportJob API for a new download URL as needed.

      Job descriptions are available for 14 days after the job starts.

      " + }, + "DescribeAssetBundleImportJob":{ + "name":"DescribeAssetBundleImportJob", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-import-jobs/{AssetBundleImportJobId}" + }, + "input":{"shape":"DescribeAssetBundleImportJobRequest"}, + "output":{"shape":"DescribeAssetBundleImportJobResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Describes an existing import job.

      Poll job descriptions after starting a job to know when it has succeeded or failed. Job descriptions are available for 14 days after job starts.

      " + }, "DescribeDashboard":{ "name":"DescribeDashboard", "http":{ @@ -1651,6 +1681,40 @@ ], "documentation":"

      Lists Amazon QuickSight analyses that exist in the specified Amazon Web Services account.

      " }, + "ListAssetBundleExportJobs":{ + "name":"ListAssetBundleExportJobs", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-export-jobs" + }, + "input":{"shape":"ListAssetBundleExportJobsRequest"}, + "output":{"shape":"ListAssetBundleExportJobsResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Lists all asset bundle export jobs that have been taken place in the last 14 days. Jobs created more than 14 days ago are deleted forever and are not returned. If you are using the same job ID for multiple jobs, ListAssetBundleExportJobs only returns the most recent job that uses the repeated job ID.

      " + }, + "ListAssetBundleImportJobs":{ + "name":"ListAssetBundleImportJobs", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-import-jobs" + }, + "input":{"shape":"ListAssetBundleImportJobsRequest"}, + "output":{"shape":"ListAssetBundleImportJobsResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Lists all asset bundle import jobs that have taken place in the last 14 days. Jobs created more than 14 days ago are deleted forever and are not returned. If you are using the same job ID for multiple jobs, ListAssetBundleImportJobs only returns the most recent job that uses the repeated job ID.

      " + }, "ListDashboardVersions":{ "name":"ListDashboardVersions", "http":{ @@ -2283,6 +2347,44 @@ ], "documentation":"

      Use the SearchGroups operation to search groups in a specified Amazon QuickSight namespace using the supplied filters.

      " }, + "StartAssetBundleExportJob":{ + "name":"StartAssetBundleExportJob", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-export-jobs/export" + }, + "input":{"shape":"StartAssetBundleExportJobRequest"}, + "output":{"shape":"StartAssetBundleExportJobResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Starts an Asset Bundle export job.

      An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 10 export jobs concurrently.

      The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.

      " + }, + "StartAssetBundleImportJob":{ + "name":"StartAssetBundleImportJob", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/asset-bundle-import-jobs/import" + }, + "input":{"shape":"StartAssetBundleImportJobRequest"}, + "output":{"shape":"StartAssetBundleImportJobResponse"}, + "errors":[ + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Starts an Asset Bundle import job.

      An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 10 import jobs concurrently.

      The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.

      " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -3316,139 +3418,867 @@ "documentation":"

      The name of the parameter that is used for the anchor date configuration.

      " } }, - "documentation":"

      The date configuration of the filter.

      " + "documentation":"

      The date configuration of the filter.

      " + }, + "AnchorOption":{ + "type":"string", + "enum":["NOW"] + }, + "AnonymousUserDashboardEmbeddingConfiguration":{ + "type":"structure", + "required":["InitialDashboardId"], + "members":{ + "InitialDashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The dashboard ID for the dashboard that you want the user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this dashboard.

      The Amazon Resource Name (ARN) of this dashboard must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + } + }, + "documentation":"

      Information about the dashboard that you want to embed.

      " + }, + "AnonymousUserDashboardVisualEmbeddingConfiguration":{ + "type":"structure", + "required":["InitialDashboardVisualId"], + "members":{ + "InitialDashboardVisualId":{ + "shape":"DashboardVisualId", + "documentation":"

      The visual ID for the visual that you want the user to see. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this visual.

      The Amazon Resource Name (ARN) of the dashboard that the visual belongs to must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + } + }, + "documentation":"

      The experience that you are embedding. You can use this object to generate a url that embeds a visual into your application.

      " + }, + "AnonymousUserEmbeddingExperienceConfiguration":{ + "type":"structure", + "members":{ + "Dashboard":{ + "shape":"AnonymousUserDashboardEmbeddingConfiguration", + "documentation":"

      The type of embedding experience. In this case, Amazon QuickSight dashboards.

      " + }, + "DashboardVisual":{ + "shape":"AnonymousUserDashboardVisualEmbeddingConfiguration", + "documentation":"

      The type of embedding experience. In this case, Amazon QuickSight visuals.

      " + }, + "QSearchBar":{ + "shape":"AnonymousUserQSearchBarEmbeddingConfiguration", + "documentation":"

      The Q search bar that you want to use for anonymous user embedding.

      " + } + }, + "documentation":"

      The type of experience you want to embed. For anonymous users, you can embed Amazon QuickSight dashboards.

      " + }, + "AnonymousUserQSearchBarEmbeddingConfiguration":{ + "type":"structure", + "required":["InitialTopicId"], + "members":{ + "InitialTopicId":{ + "shape":"RestrictiveResourceId", + "documentation":"

      The QuickSight Q topic ID of the topic that you want the anonymous user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders the Q search bar with this topic pre-selected.

      The Amazon Resource Name (ARN) of this Q topic must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + } + }, + "documentation":"

      The settings that you want to use with the Q search bar.

      " + }, + "ArcAxisConfiguration":{ + "type":"structure", + "members":{ + "Range":{ + "shape":"ArcAxisDisplayRange", + "documentation":"

      The arc axis range of a GaugeChartVisual.

      " + }, + "ReserveRange":{ + "shape":"Integer", + "documentation":"

      The reserved range of the arc axis.

      " + } + }, + "documentation":"

      The arc axis configuration of a GaugeChartVisual.

      " + }, + "ArcAxisDisplayRange":{ + "type":"structure", + "members":{ + "Min":{ + "shape":"Double", + "documentation":"

      The minimum value of the arc axis range.

      ", + "box":true + }, + "Max":{ + "shape":"Double", + "documentation":"

      The maximum value of the arc axis range.

      ", + "box":true + } + }, + "documentation":"

      The arc axis range of a GaugeChartVisual.

      " + }, + "ArcConfiguration":{ + "type":"structure", + "members":{ + "ArcAngle":{ + "shape":"Double", + "documentation":"

      The option that determines the arc angle of a GaugeChartVisual.

      ", + "box":true + }, + "ArcThickness":{ + "shape":"ArcThicknessOptions", + "documentation":"

      The options that determine the arc thickness of a GaugeChartVisual.

      " + } + }, + "documentation":"

      The arc configuration of a GaugeChartVisual.

      " + }, + "ArcOptions":{ + "type":"structure", + "members":{ + "ArcThickness":{ + "shape":"ArcThickness", + "documentation":"

      The arc thickness of a GaugeChartVisual.

      " + } + }, + "documentation":"

      The options that determine the arc thickness of a GaugeChartVisual.

      " + }, + "ArcThickness":{ + "type":"string", + "enum":[ + "SMALL", + "MEDIUM", + "LARGE", + "WHOLE" + ] + }, + "ArcThicknessOptions":{ + "type":"string", + "enum":[ + "SMALL", + "MEDIUM", + "LARGE" + ] + }, + "Arn":{"type":"string"}, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "AssetBundleCloudFormationOverridePropertyConfiguration":{ + "type":"structure", + "members":{ + "ResourceIdOverrideConfiguration":{ + "shape":"AssetBundleExportJobResourceIdOverrideConfiguration", + "documentation":"

      An optional list of structures that control how resource IDs are parameterized in the returned CloudFormation template.

      " + }, + "VPCConnections":{ + "shape":"AssetBundleExportJobVPCConnectionOverridePropertiesList", + "documentation":"

      An optional list of structures that control how VPCConnection resources are parameterized in the returned CloudFormation template.

      " + }, + "RefreshSchedules":{ + "shape":"AssetBundleExportJobRefreshScheduleOverridePropertiesList", + "documentation":"

      An optional list of structures that control how RefreshSchedule resources are parameterized in the returned CloudFormation template.

      " + }, + "DataSources":{ + "shape":"AssetBundleExportJobDataSourceOverridePropertiesList", + "documentation":"

      An optional list of structures that control how DataSource resources are parameterized in the returned CloudFormation template.

      " + }, + "DataSets":{ + "shape":"AssetBundleExportJobDataSetOverridePropertiesList", + "documentation":"

      An optional list of structures that control how DataSet resources are parameterized in the returned CloudFormation template.

      " + }, + "Themes":{ + "shape":"AssetBundleExportJobThemeOverridePropertiesList", + "documentation":"

      An optional list of structures that control how Theme resources are parameterized in the returned CloudFormation template.

      " + }, + "Analyses":{ + "shape":"AssetBundleExportJobAnalysisOverridePropertiesList", + "documentation":"

      An optional list of structures that control how Analysis resources are parameterized in the returned CloudFormation template.

      " + }, + "Dashboards":{ + "shape":"AssetBundleExportJobDashboardOverridePropertiesList", + "documentation":"

      An optional list of structures that control how Dashboard resources are parameterized in the returned CloudFormation template.

      " + } + }, + "documentation":"

      An optional collection of CloudFormation property configurations that control how the export job is generated.

      " + }, + "AssetBundleExportFormat":{ + "type":"string", + "enum":[ + "CLOUDFORMATION_JSON", + "QUICKSIGHT_JSON" + ] + }, + "AssetBundleExportJobAnalysisOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific Analysis resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobAnalysisPropertyToOverrideList", + "documentation":"

      A list of Analysis resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific Analysis resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobAnalysisOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobAnalysisOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobAnalysisPropertyToOverride":{ + "type":"string", + "enum":["Name"] + }, + "AssetBundleExportJobAnalysisPropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobAnalysisPropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobDashboardOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific Dashboard resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobDashboardPropertyToOverrideList", + "documentation":"

      A list of Dashboard resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific Dashboard resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobDashboardOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDashboardOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobDashboardPropertyToOverride":{ + "type":"string", + "enum":["Name"] + }, + "AssetBundleExportJobDashboardPropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDashboardPropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobDataSetOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific DataSet resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobDataSetPropertyToOverrideList", + "documentation":"

      A list of DataSet resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific DataSet resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobDataSetOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDataSetOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobDataSetPropertyToOverride":{ + "type":"string", + "enum":["Name"] + }, + "AssetBundleExportJobDataSetPropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDataSetPropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobDataSourceOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific DataSource resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobDataSourcePropertyToOverrideList", + "documentation":"

      A list of DataSource resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific DataSource resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobDataSourceOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDataSourceOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobDataSourcePropertyToOverride":{ + "type":"string", + "enum":[ + "Name", + "DisableSsl", + "SecretArn", + "Username", + "Password", + "Domain", + "WorkGroup", + "Host", + "Port", + "Database", + "DataSetName", + "Catalog", + "InstanceId", + "ClusterId", + "ManifestFileLocation", + "Warehouse", + "RoleArn" + ] + }, + "AssetBundleExportJobDataSourcePropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobDataSourcePropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobError":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the resource whose processing caused an error.

      " + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

      The specific error type of the error that occurred.

      " + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the error.

      " + } + }, + "documentation":"

      Describes an error that occurred during an Asset Bundle export job.

      " + }, + "AssetBundleExportJobErrorList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobError"} + }, + "AssetBundleExportJobRefreshScheduleOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific RefreshSchedule resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobRefreshSchedulePropertyToOverrideList", + "documentation":"

      A list of RefreshSchedule resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific RefreshSchedule resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobRefreshScheduleOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobRefreshScheduleOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobRefreshSchedulePropertyToOverride":{ + "type":"string", + "enum":["StartAfterDateTime"] + }, + "AssetBundleExportJobRefreshSchedulePropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobRefreshSchedulePropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobResourceIdOverrideConfiguration":{ + "type":"structure", + "members":{ + "PrefixForAllResources":{ + "shape":"Boolean", + "documentation":"

      An option to request a CloudFormation variable for a prefix to be prepended to each resource's ID before import. The prefix is only added to the asset IDs and does not change the name of the asset.

      " + } + }, + "documentation":"

      An optional structure that configures resource ID overrides for the export job.

      " + }, + "AssetBundleExportJobStatus":{ + "type":"string", + "enum":[ + "QUEUED_FOR_IMMEDIATE_EXECUTION", + "IN_PROGRESS", + "SUCCESSFUL", + "FAILED" + ] + }, + "AssetBundleExportJobSummary":{ + "type":"structure", + "members":{ + "JobStatus":{ + "shape":"AssetBundleExportJobStatus", + "documentation":"

      The current status of the export job.

      " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the export job.

      " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

      The time that the export job was created.

      " + }, + "AssetBundleExportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the export job.

      " + }, + "IncludeAllDependencies":{ + "shape":"Boolean", + "documentation":"

      The flag that determines the inclusion of resource dependencies in the returned asset bundle.

      " + }, + "ExportFormat":{ + "shape":"AssetBundleExportFormat", + "documentation":"

      The format for the export job.

      " + } + }, + "documentation":"

      A summary of the export job that includes details of the job's configuration and its current status.

      " + }, + "AssetBundleExportJobSummaryList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobSummary"} + }, + "AssetBundleExportJobThemeOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific Theme resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobThemePropertyToOverrideList", + "documentation":"

      A list of Theme resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific Theme resource is parameterized in the returned CloudFormation template.

      " + }, + "AssetBundleExportJobThemeOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobThemeOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobThemePropertyToOverride":{ + "type":"string", + "enum":["Name"] + }, + "AssetBundleExportJobThemePropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobThemePropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleExportJobVPCConnectionOverrideProperties":{ + "type":"structure", + "required":["Properties"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the specific VPCConnection resource whose override properties are configured in this structure.

      " + }, + "Properties":{ + "shape":"AssetBundleExportJobVPCConnectionPropertyToOverrideList", + "documentation":"

      A list of VPCConnection resource properties to generate variables for in the returned CloudFormation template.

      " + } + }, + "documentation":"

      Controls how a specific VPCConnection resource is parameterized in the outputted CloudFormation template.

      " + }, + "AssetBundleExportJobVPCConnectionOverridePropertiesList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobVPCConnectionOverrideProperties"}, + "max":50, + "min":1 + }, + "AssetBundleExportJobVPCConnectionPropertyToOverride":{ + "type":"string", + "enum":[ + "Name", + "DnsResolvers", + "RoleArn" + ] + }, + "AssetBundleExportJobVPCConnectionPropertyToOverrideList":{ + "type":"list", + "member":{"shape":"AssetBundleExportJobVPCConnectionPropertyToOverride"}, + "max":10, + "min":1 + }, + "AssetBundleImportBodyBlob":{ + "type":"blob", + "max":20971520, + "min":0, + "sensitive":true + }, + "AssetBundleImportFailureAction":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK" + ] + }, + "AssetBundleImportJobAnalysisOverrideParameters":{ + "type":"structure", + "required":["AnalysisId"], + "members":{ + "AnalysisId":{ + "shape":"ResourceId", + "documentation":"

      The ID of the analysis that you ant to apply overrides to.

      " + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the analysis.

      " + } + }, + "documentation":"

      The override parameters for a single analysis that is being imported.

      " + }, + "AssetBundleImportJobAnalysisOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobAnalysisOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobDashboardOverrideParameters":{ + "type":"structure", + "required":["DashboardId"], + "members":{ + "DashboardId":{ + "shape":"ResourceId", + "documentation":"

      The ID of the dashboard that you want to apply overrides to.

      " + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the dashboard.

      " + } + }, + "documentation":"

      The override parameters for a single dashboard that is being imported.

      " + }, + "AssetBundleImportJobDashboardOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobDashboardOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobDataSetOverrideParameters":{ + "type":"structure", + "required":["DataSetId"], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "documentation":"

      The ID of the dataset to apply overrides to.

      " + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the dataset.

      " + } + }, + "documentation":"

      The override parameters for a single dataset that is being imported.

      " + }, + "AssetBundleImportJobDataSetOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobDataSetOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobDataSourceCredentialPair":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{ + "shape":"DbUsername", + "documentation":"

      The username for the data source connection.

      " + }, + "Password":{ + "shape":"Password", + "documentation":"

      The password for the data source connection.

      " + } + }, + "documentation":"

      A username and password credential pair to use to import a data source resource.

      ", + "sensitive":true + }, + "AssetBundleImportJobDataSourceCredentials":{ + "type":"structure", + "members":{ + "CredentialPair":{ + "shape":"AssetBundleImportJobDataSourceCredentialPair", + "documentation":"

      A username and password credential pair to be used to create the imported data source. Keep this field blank if you are using a Secrets Manager secret to provide credentials.

      " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

      The ARN of the Secrets Manager secret that's used to create the imported data source. Keep this field blank, unless you are using a secret in place of a credential pair.

      " + } + }, + "documentation":"

      The login credentials to use to import a data source resource.

      " + }, + "AssetBundleImportJobDataSourceOverrideParameters":{ + "type":"structure", + "required":["DataSourceId"], + "members":{ + "DataSourceId":{ + "shape":"ResourceId", + "documentation":"

      The ID of the data source to apply overrides to.

      " + }, + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the data source.

      " + }, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"}, + "Credentials":{ + "shape":"AssetBundleImportJobDataSourceCredentials", + "documentation":"

      An optional structure that provides the credentials to be used to create the imported data source.

      " + } + }, + "documentation":"

      The override parameters for a single data source that is being imported.

      " + }, + "AssetBundleImportJobDataSourceOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobDataSourceOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobError":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the resource whose processing caused an error.

      " + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

      The specific error type or the error that occurred.

      " + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the error.

      " + } + }, + "documentation":"

      Describes an error that occurred within an Asset Bundle import execution.

      " }, - "AnchorOption":{ - "type":"string", - "enum":["NOW"] + "AssetBundleImportJobErrorList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobError"} }, - "AnonymousUserDashboardEmbeddingConfiguration":{ + "AssetBundleImportJobOverrideParameters":{ "type":"structure", - "required":["InitialDashboardId"], "members":{ - "InitialDashboardId":{ - "shape":"ShortRestrictiveResourceId", - "documentation":"

      The dashboard ID for the dashboard that you want the user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this dashboard.

      The Amazon Resource Name (ARN) of this dashboard must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + "ResourceIdOverrideConfiguration":{ + "shape":"AssetBundleImportJobResourceIdOverrideConfiguration", + "documentation":"

      An optional structure that configures resource ID overrides to be applied within the import job.

      " + }, + "VPCConnections":{ + "shape":"AssetBundleImportJobVPCConnectionOverrideParametersList", + "documentation":"

      A list of overrides for any VPCConnection resources that are present in the asset bundle that is imported.

      " + }, + "RefreshSchedules":{ + "shape":"AssetBundleImportJobRefreshScheduleOverrideParametersList", + "documentation":"

      A list of overrides for any RefreshSchedule resources that are present in the asset bundle that is imported.

      " + }, + "DataSources":{ + "shape":"AssetBundleImportJobDataSourceOverrideParametersList", + "documentation":"

      A list of overrides for any DataSource resources that are present in the asset bundle that is imported.

      " + }, + "DataSets":{ + "shape":"AssetBundleImportJobDataSetOverrideParametersList", + "documentation":"

      A list of overrides for any DataSet resources that are present in the asset bundle that is imported.

      " + }, + "Themes":{ + "shape":"AssetBundleImportJobThemeOverrideParametersList", + "documentation":"

      A list of overrides for any Theme resources that are present in the asset bundle that is imported.

      " + }, + "Analyses":{ + "shape":"AssetBundleImportJobAnalysisOverrideParametersList", + "documentation":"

      A list of overrides for any Analysis resources that are present in the asset bundle that is imported.

      " + }, + "Dashboards":{ + "shape":"AssetBundleImportJobDashboardOverrideParametersList", + "documentation":"

      A list of overrides for any Dashboard resources that are present in the asset bundle that is imported.

      " } }, - "documentation":"

      Information about the dashboard that you want to embed.

      " + "documentation":"

      A list of overrides that modify the asset bundle resource configuration before the resource is imported.

      " }, - "AnonymousUserDashboardVisualEmbeddingConfiguration":{ + "AssetBundleImportJobRefreshScheduleOverrideParameters":{ "type":"structure", - "required":["InitialDashboardVisualId"], + "required":[ + "DataSetId", + "ScheduleId" + ], "members":{ - "InitialDashboardVisualId":{ - "shape":"DashboardVisualId", - "documentation":"

      The visual ID for the visual that you want the user to see. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders this visual.

      The Amazon Resource Name (ARN) of the dashboard that the visual belongs to must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + "DataSetId":{ + "shape":"ResourceId", + "documentation":"

      A partial identifier for the specific RefreshSchedule resource that is being overridden. This structure is used together with the ScheduleID structure.

      " + }, + "ScheduleId":{ + "shape":"String", + "documentation":"

      A partial identifier for the specific RefreshSchedule resource being overridden. This structure is used together with the DataSetId structure.

      " + }, + "StartAfterDateTime":{ + "shape":"Timestamp", + "documentation":"

      An override for the StartAfterDateTime of a RefreshSchedule. Make sure that the StartAfterDateTime is set to a time that takes place in the future.

      " } }, - "documentation":"

      The experience that you are embedding. You can use this object to generate a url that embeds a visual into your application.

      " + "documentation":"

      A list of overrides for a specific RefreshsSchedule resource that is present in the asset bundle that is imported.

      " }, - "AnonymousUserEmbeddingExperienceConfiguration":{ + "AssetBundleImportJobRefreshScheduleOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobRefreshScheduleOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobResourceIdOverrideConfiguration":{ "type":"structure", "members":{ - "Dashboard":{ - "shape":"AnonymousUserDashboardEmbeddingConfiguration", - "documentation":"

      The type of embedding experience. In this case, Amazon QuickSight dashboards.

      " - }, - "DashboardVisual":{ - "shape":"AnonymousUserDashboardVisualEmbeddingConfiguration", - "documentation":"

      The type of embedding experience. In this case, Amazon QuickSight visuals.

      " - }, - "QSearchBar":{ - "shape":"AnonymousUserQSearchBarEmbeddingConfiguration", - "documentation":"

      The Q search bar that you want to use for anonymous user embedding.

      " + "PrefixForAllResources":{ + "shape":"String", + "documentation":"

      An option to request a CloudFormation variable for a prefix to be prepended to each resource's ID before import. The prefix is only added to the asset IDs and does not change the name of the asset.

      " } }, - "documentation":"

      The type of experience you want to embed. For anonymous users, you can embed Amazon QuickSight dashboards.

      " + "documentation":"

      An optional structure that configures resource ID overrides for the import job.

      " }, - "AnonymousUserQSearchBarEmbeddingConfiguration":{ + "AssetBundleImportJobStatus":{ + "type":"string", + "enum":[ + "QUEUED_FOR_IMMEDIATE_EXECUTION", + "IN_PROGRESS", + "SUCCESSFUL", + "FAILED", + "FAILED_ROLLBACK_IN_PROGRESS", + "FAILED_ROLLBACK_COMPLETED", + "FAILED_ROLLBACK_ERROR" + ] + }, + "AssetBundleImportJobSummary":{ "type":"structure", - "required":["InitialTopicId"], "members":{ - "InitialTopicId":{ - "shape":"RestrictiveResourceId", - "documentation":"

      The QuickSight Q topic ID of the topic that you want the anonymous user to see first. This ID is included in the output URL. When the URL in response is accessed, Amazon QuickSight renders the Q search bar with this topic pre-selected.

      The Amazon Resource Name (ARN) of this Q topic must be included in the AuthorizedResourceArns parameter. Otherwise, the request will fail with InvalidParameterValueException.

      " + "JobStatus":{ + "shape":"AssetBundleImportJobStatus", + "documentation":"

      The current status of the import job.

      " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

      The ARN of the import job.

      " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

      The time that the import job was created.

      " + }, + "AssetBundleImportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

      " + }, + "FailureAction":{ + "shape":"AssetBundleImportFailureAction", + "documentation":"

      The failure action for the import job.

      " } }, - "documentation":"

      The settings that you want to use with the Q search bar.

      " + "documentation":"

      A summary of the import job that includes details of the requested job's configuration and its current status.

      " }, - "ArcAxisConfiguration":{ + "AssetBundleImportJobSummaryList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobSummary"} + }, + "AssetBundleImportJobThemeOverrideParameters":{ "type":"structure", + "required":["ThemeId"], "members":{ - "Range":{ - "shape":"ArcAxisDisplayRange", - "documentation":"

      The arc axis range of a GaugeChartVisual.

      " + "ThemeId":{ + "shape":"ResourceId", + "documentation":"

      The ID of the theme to apply overrides to.

      " }, - "ReserveRange":{ - "shape":"Integer", - "documentation":"

      The reserved range of the arc axis.

      " + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the theme.

      " } }, - "documentation":"

      The arc axis configuration of a GaugeChartVisual.

      " + "documentation":"

      The override parameters for a single theme that is imported.

      " }, - "ArcAxisDisplayRange":{ + "AssetBundleImportJobThemeOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobThemeOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportJobVPCConnectionOverrideParameters":{ "type":"structure", + "required":["VPCConnectionId"], "members":{ - "Min":{ - "shape":"Double", - "documentation":"

      The minimum value of the arc axis range.

      ", - "box":true + "VPCConnectionId":{ + "shape":"VPCConnectionResourceIdUnrestricted", + "documentation":"

      The ID of the VPC Connection to apply overrides to.

      " }, - "Max":{ - "shape":"Double", - "documentation":"

      The maximum value of the arc axis range.

      ", - "box":true + "Name":{ + "shape":"ResourceName", + "documentation":"

      A new name for the VPC connection.

      " + }, + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

      A list of new subnet IDs for the VPC connection you are importing. This field is required if you are importing the VPC connection from another Amazon Web Services account or Region.

      " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

      A new security group ID for the VPC connection you are importing. This field is required if you are importing the VPC connection from another Amazon Web Services account or Region.

      " + }, + "DnsResolvers":{ + "shape":"DnsResolverList", + "documentation":"

      An optional override of DNS resolvers to be used by the VPC connection.

      " + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

      An optional override of the role ARN to be used by the VPC connection.

      " } }, - "documentation":"

      The arc axis range of a GaugeChartVisual.

      " + "documentation":"

      The override parameters for a single VPC connection that is imported.

      " }, - "ArcConfiguration":{ + "AssetBundleImportJobVPCConnectionOverrideParametersList":{ + "type":"list", + "member":{"shape":"AssetBundleImportJobVPCConnectionOverrideParameters"}, + "max":50, + "min":1 + }, + "AssetBundleImportSource":{ "type":"structure", "members":{ - "ArcAngle":{ - "shape":"Double", - "documentation":"

      The option that determines the arc angle of a GaugeChartVisual.

      ", - "box":true + "Body":{ + "shape":"AssetBundleImportBodyBlob", + "documentation":"

      The bytes of the base64 encoded asset bundle import zip file. This file can't exceed 20 MB.

      If you are calling the API operations from the Amazon Web Services SDK for Java, JavaScript, Python, or PHP, the SDK encodes base64 automatically to allow the direct setting of the zip file's bytes. If you are using an SDK for a different language or receiving related errors, try to base64 encode your data.

      " }, - "ArcThickness":{ - "shape":"ArcThicknessOptions", - "documentation":"

      The options that determine the arc thickness of a GaugeChartVisual.

      " + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

      The Amazon S3 URI for an asset bundle import file that exists in an Amazon S3 bucket that the caller has read access to. The file must be a zip format file and can't exceed 20 MB.

      " } }, - "documentation":"

      The arc configuration of a GaugeChartVisual.

      " + "documentation":"

      The source of the asset bundle zip file that contains the data that you want to import.

      " }, - "ArcOptions":{ + "AssetBundleImportSourceDescription":{ "type":"structure", "members":{ - "ArcThickness":{ - "shape":"ArcThickness", - "documentation":"

      The arc thickness of a GaugeChartVisual.

      " + "Body":{ + "shape":"String", + "documentation":"

      An HTTPS download URL for the provided asset bundle that you optionally provided at the start of the import job. This URL is valid for five minutes after issuance. Call DescribeAssetBundleExportJob again for a fresh URL if needed. The downloaded asset bundle is a .qs zip file.

      " + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

      The Amazon S3 URI that you provided at the start of the import job.

      " } }, - "documentation":"

      The options that determine the arc thickness of a GaugeChartVisual.

      " - }, - "ArcThickness":{ - "type":"string", - "enum":[ - "SMALL", - "MEDIUM", - "LARGE", - "WHOLE" - ] - }, - "ArcThicknessOptions":{ - "type":"string", - "enum":[ - "SMALL", - "MEDIUM", - "LARGE" - ] + "documentation":"

      A description of the import source that you provide at the start of an import job. This value is set to either Body or S3Uri, depending on how the StartAssetBundleImportJobRequest is configured.

      " }, - "Arn":{"type":"string"}, - "ArnList":{ + "AssetBundleResourceArns":{ "type":"list", - "member":{"shape":"Arn"} + "member":{"shape":"Arn"}, + "max":100, + "min":1 }, "AssignmentStatus":{ "type":"string", @@ -4605,7 +5435,7 @@ "members":{ "ClusterMarker":{ "shape":"ClusterMarker", - "documentation":"

      The cluster marker that is a part of the cluster marker configuration

      " + "documentation":"

      The cluster marker that is a part of the cluster marker configuration.

      " } }, "documentation":"

      The cluster marker configuration of the geospatial map selected point style.

      " @@ -6853,7 +7683,7 @@ ], "members":{ "Username":{ - "shape":"Username", + "shape":"DbUsername", "documentation":"

      User name.

      " }, "Password":{ @@ -8888,6 +9718,11 @@ "SATURDAY" ] }, + "DbUsername":{ + "type":"string", + "max":64, + "min":1 + }, "DecimalDatasetParameter":{ "type":"structure", "required":[ @@ -10388,53 +11223,207 @@ "shape":"ResourcePermissionList", "documentation":"

      A structure that describes the principals and the resource-level permissions on an analysis.

      " }, - "Status":{ - "shape":"StatusCode", - "documentation":"

      The HTTP status of the request.

      ", - "location":"statusCode" + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the request.

      ", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

      The Amazon Web Services request ID for this operation.

      " + } + } + }, + "DescribeAnalysisRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AnalysisId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account that contains the analysis. You must be using the Amazon Web Services account that the analysis is in.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AnalysisId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the analysis that you're describing. The ID is part of the URL of the analysis.

      ", + "location":"uri", + "locationName":"AnalysisId" + } + } + }, + "DescribeAnalysisResponse":{ + "type":"structure", + "members":{ + "Analysis":{ + "shape":"Analysis", + "documentation":"

      A metadata structure that contains summary information for the analysis that you're describing.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the request.

      ", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

      The Amazon Web Services request ID for this operation.

      " + } + } + }, + "DescribeAssetBundleExportJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssetBundleExportJobId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account the export job is executed in.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssetBundleExportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job that you want described. The job ID is set when you start a new job with a StartAssetBundleExportJob API call.

      ", + "location":"uri", + "locationName":"AssetBundleExportJobId" + } + } + }, + "DescribeAssetBundleExportJobResponse":{ + "type":"structure", + "members":{ + "JobStatus":{ + "shape":"AssetBundleExportJobStatus", + "documentation":"

      Indicates the status of a job through its queuing and execution.

      Poll this DescribeAssetBundleExportApi until JobStatus is either SUCCESSFUL or FAILED.

      " + }, + "DownloadUrl":{ + "shape":"String", + "documentation":"

      The URL to download the exported asset bundle data from.

      This URL is available only after the job has succeeded. This URL is valid for 5 minutes after issuance. Call DescribeAssetBundleExportJob again for a fresh URL if needed.

      The downloaded asset bundle is a zip file named assetbundle-{jobId}.qs. The file has a .qs extension.

      This URL can't be used in a StartAssetBundleImportJob API call and should only be used for download purposes.

      " + }, + "Errors":{ + "shape":"AssetBundleExportJobErrorList", + "documentation":"

      An array of error records that describes any failures that occurred during the export job processing.

      Error records accumulate while the job runs. The complete set of error records is available after the job has completed and failed.

      " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) for the export job.

      " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

      The time that the export job was created.

      " + }, + "AssetBundleExportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. The job ID is set when you start a new job with a StartAssetBundleExportJob API call.

      " + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account that the export job was executed in.

      " + }, + "ResourceArns":{ + "shape":"AssetBundleResourceArns", + "documentation":"

      A list of resource ARNs that exported with the job.

      " + }, + "IncludeAllDependencies":{ + "shape":"Boolean", + "documentation":"

      The include dependencies flag.

      " + }, + "ExportFormat":{ + "shape":"AssetBundleExportFormat", + "documentation":"

      The format of the export.

      " + }, + "CloudFormationOverridePropertyConfiguration":{ + "shape":"AssetBundleCloudFormationOverridePropertyConfiguration", + "documentation":"

      The CloudFormation override property configuration for the export job.

      " }, "RequestId":{ - "shape":"String", + "shape":"NonEmptyString", "documentation":"

      The Amazon Web Services request ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the response.

      ", + "location":"statusCode" } } }, - "DescribeAnalysisRequest":{ + "DescribeAssetBundleImportJobRequest":{ "type":"structure", "required":[ "AwsAccountId", - "AnalysisId" + "AssetBundleImportJobId" ], "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

      The ID of the Amazon Web Services account that contains the analysis. You must be using the Amazon Web Services account that the analysis is in.

      ", + "documentation":"

      The ID of the Amazon Web Services account the import job was executed in.

      ", "location":"uri", "locationName":"AwsAccountId" }, - "AnalysisId":{ + "AssetBundleImportJobId":{ "shape":"ShortRestrictiveResourceId", - "documentation":"

      The ID of the analysis that you're describing. The ID is part of the URL of the analysis.

      ", + "documentation":"

      The ID of the job. The job ID is set when you start a new job with a StartAssetBundleImportJob API call.

      ", "location":"uri", - "locationName":"AnalysisId" + "locationName":"AssetBundleImportJobId" } } }, - "DescribeAnalysisResponse":{ + "DescribeAssetBundleImportJobResponse":{ "type":"structure", "members":{ - "Analysis":{ - "shape":"Analysis", - "documentation":"

      A metadata structure that contains summary information for the analysis that you're describing.

      " + "JobStatus":{ + "shape":"AssetBundleImportJobStatus", + "documentation":"

      Indicates the status of a job through its queuing and execution.

      Poll the DescribeAssetBundleImport API until JobStatus returns one of the following values:

      • SUCCESSFUL

      • FAILED

      • FAILED_ROLLBACK_COMPLETED

      • FAILED_ROLLBACK_ERROR

      " }, - "Status":{ - "shape":"StatusCode", - "documentation":"

      The HTTP status of the request.

      ", - "location":"statusCode" + "Errors":{ + "shape":"AssetBundleImportJobErrorList", + "documentation":"

      An array of error records that describes any failures that occurred during the export job processing.

      Error records accumulate while the job is still running. The complete set of error records is available after the job has completed and failed.

      " + }, + "RollbackErrors":{ + "shape":"AssetBundleImportJobErrorList", + "documentation":"

      An array of error records that describes any failures that occurred while an import job was attempting a rollback.

      Error records accumulate while the job is still running. The complete set of error records is available after the job has completed and failed.

      " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) for the import job.

      " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

      The time that the import job was created.

      " + }, + "AssetBundleImportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. The job ID is set when you start a new job with a StartAssetBundleImportJob API call.

      " + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account the import job was executed in.

      " + }, + "AssetBundleImportSource":{ + "shape":"AssetBundleImportSourceDescription", + "documentation":"

      The source of the asset bundle zip file that contains the data that is imported by the job.

      " + }, + "OverrideParameters":{ + "shape":"AssetBundleImportJobOverrideParameters", + "documentation":"

      Optional overrides to be applied to the resource configuration before import.

      " + }, + "FailureAction":{ + "shape":"AssetBundleImportFailureAction", + "documentation":"

      The failure action for the import job.

      " }, "RequestId":{ - "shape":"String", + "shape":"NonEmptyString", "documentation":"

      The Amazon Web Services request ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the response.

      ", + "location":"statusCode" } } }, @@ -14089,6 +15078,43 @@ }, "documentation":"

      The bound options (north, south, west, east) of the geospatial window options.

      " }, + "GeospatialHeatmapColorScale":{ + "type":"structure", + "members":{ + "Colors":{ + "shape":"GeospatialHeatmapDataColorList", + "documentation":"

      The list of colors to be used in heatmap point style.

      " + } + }, + "documentation":"

      The color scale specification for the heatmap point style.

      " + }, + "GeospatialHeatmapConfiguration":{ + "type":"structure", + "members":{ + "HeatmapColor":{ + "shape":"GeospatialHeatmapColorScale", + "documentation":"

      The color scale specification for the heatmap point style.

      " + } + }, + "documentation":"

      The heatmap configuration of the geospatial point style.

      " + }, + "GeospatialHeatmapDataColor":{ + "type":"structure", + "required":["Color"], + "members":{ + "Color":{ + "shape":"HexColor", + "documentation":"

      The hex color to be used in the heatmap point style.

      " + } + }, + "documentation":"

      The color to be used in the heatmap point style.

      " + }, + "GeospatialHeatmapDataColorList":{ + "type":"list", + "member":{"shape":"GeospatialHeatmapDataColor"}, + "max":2, + "min":2 + }, "GeospatialMapAggregatedFieldWells":{ "type":"structure", "members":{ @@ -14199,6 +15225,10 @@ "ClusterMarkerConfiguration":{ "shape":"ClusterMarkerConfiguration", "documentation":"

      The cluster marker configuration of the geospatial point style.

      " + }, + "HeatmapConfiguration":{ + "shape":"GeospatialHeatmapConfiguration", + "documentation":"

      The heatmap configuration of the geospatial point style.

      " } }, "documentation":"

      The point style of the geospatial map.

      " @@ -14207,7 +15237,8 @@ "type":"string", "enum":[ "POINT", - "CLUSTER" + "CLUSTER", + "HEATMAP" ] }, "GeospatialWindowOptions":{ @@ -16262,6 +17293,100 @@ } } }, + "ListAssetBundleExportJobsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account that the export jobs were executed in.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The token for the next set of results, or null if there are no more results.

      ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to be returned per request.

      ", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListAssetBundleExportJobsResponse":{ + "type":"structure", + "members":{ + "AssetBundleExportJobSummaryList":{ + "shape":"AssetBundleExportJobSummaryList", + "documentation":"

      A list of export job summaries.

      " + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The token for the next set of results, or null if there are no more results.

      " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Web Services request ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the request.

      ", + "location":"statusCode" + } + } + }, + "ListAssetBundleImportJobsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account that the import jobs were executed in.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The token for the next set of results, or null if there are no more results.

      ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to be returned per request.

      ", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListAssetBundleImportJobsResponse":{ + "type":"structure", + "members":{ + "AssetBundleImportJobSummaryList":{ + "shape":"AssetBundleImportJobSummaryList", + "documentation":"

      A list of import job summaries.

      " + }, + "NextToken":{ + "shape":"String", + "documentation":"

      The token for the next set of results, or null if there are no more results.

      " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Web Services request ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the response.

      ", + "location":"statusCode" + } + } + }, "ListControlDisplayOptions":{ "type":"structure", "members":{ @@ -19498,6 +20623,10 @@ "Scope":{ "shape":"PivotTableConditionalFormattingScope", "documentation":"

      The scope of the cell for conditional formatting.

      " + }, + "Scopes":{ + "shape":"PivotTableConditionalFormattingScopeList", + "documentation":"

      A list of cell scopes for conditional formatting.

      " } }, "documentation":"

      The cell conditional formatting option for a pivot table.

      " @@ -19537,6 +20666,11 @@ }, "documentation":"

      The scope of the cell for conditional formatting.

      " }, + "PivotTableConditionalFormattingScopeList":{ + "type":"list", + "member":{"shape":"PivotTableConditionalFormattingScope"}, + "max":3 + }, "PivotTableConditionalFormattingScopeRole":{ "type":"string", "enum":[ @@ -19600,6 +20734,46 @@ "member":{"shape":"DimensionField"}, "max":40 }, + "PivotTableFieldCollapseState":{ + "type":"string", + "enum":[ + "COLLAPSED", + "EXPANDED" + ] + }, + "PivotTableFieldCollapseStateOption":{ + "type":"structure", + "required":["Target"], + "members":{ + "Target":{ + "shape":"PivotTableFieldCollapseStateTarget", + "documentation":"

      A tagged-union object that sets the collapse state.

      " + }, + "State":{ + "shape":"PivotTableFieldCollapseState", + "documentation":"

      The state of the field target of a pivot table. Choose one of the following options:

      • COLLAPSED

      • EXPANDED

      " + } + }, + "documentation":"

      The collapse state options for the pivot table field options.

      " + }, + "PivotTableFieldCollapseStateOptionList":{ + "type":"list", + "member":{"shape":"PivotTableFieldCollapseStateOption"} + }, + "PivotTableFieldCollapseStateTarget":{ + "type":"structure", + "members":{ + "FieldId":{ + "shape":"String", + "documentation":"

      The field ID of the pivot table that the collapse state needs to be set to.

      " + }, + "FieldDataPathValues":{ + "shape":"DataPathValueList", + "documentation":"

      The data path of the pivot table's header. Used to set the collapse state.

      " + } + }, + "documentation":"

      The target of a pivot table field collapse state.

      " + }, "PivotTableFieldOption":{ "type":"structure", "required":["FieldId"], @@ -19634,6 +20808,10 @@ "DataPathOptions":{ "shape":"PivotTableDataPathOptionList", "documentation":"

      The data path options for the pivot table field options.

      " + }, + "CollapseStateOptions":{ + "shape":"PivotTableFieldCollapseStateOptionList", + "documentation":"

      The collapse state options for the pivot table field options.

      " } }, "documentation":"

      The field options for a pivot table visual.

      " @@ -20133,6 +21311,14 @@ }, "documentation":"

      The configured style settings of a radar chart.

      " }, + "RadarChartAxesRangeScale":{ + "type":"string", + "enum":[ + "AUTO", + "INDEPENDENT", + "SHARED" + ] + }, "RadarChartCategoryFieldList":{ "type":"list", "member":{"shape":"DimensionField"}, @@ -20201,6 +21387,10 @@ "Legend":{ "shape":"LegendOptions", "documentation":"

      The legend display setup of the visual.

      " + }, + "AxesRangeScale":{ + "shape":"RadarChartAxesRangeScale", + "documentation":"

      The axis behavior options of a radar chart.

      " } }, "documentation":"

      The configuration of a RadarChartVisual.

      " @@ -21343,6 +22533,10 @@ }, "documentation":"

      A physical table type for an S3 data source.

      " }, + "S3Uri":{ + "type":"string", + "pattern":"^(https|s3)://([^/]+)/?(.*)$" + }, "SameSheetTargetVisualConfiguration":{ "type":"structure", "members":{ @@ -22807,6 +24001,119 @@ }, "documentation":"

      Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying data source.

      " }, + "StartAssetBundleExportJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssetBundleExportJobId", + "ResourceArns", + "ExportFormat" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account to export assets from.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssetBundleExportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

      " + }, + "ResourceArns":{ + "shape":"AssetBundleResourceArns", + "documentation":"

      An array of resource ARNs to export. The following resources are supported.

      • Analysis

      • Dashboard

      • DataSet

      • DataSource

      • RefreshSchedule

      • Theme

      • VPCConnection

      The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.

      " + }, + "IncludeAllDependencies":{ + "shape":"Boolean", + "documentation":"

      A Boolean that determines whether all dependencies of each resource ARN are recursively exported with the job. For example, say you provided a Dashboard ARN to the ResourceArns parameter. If you set IncludeAllDependencies to TRUE, any theme, dataset, and data source resource that is a dependency of the dashboard is also exported.

      " + }, + "ExportFormat":{ + "shape":"AssetBundleExportFormat", + "documentation":"

      The export data format.

      " + }, + "CloudFormationOverridePropertyConfiguration":{ + "shape":"AssetBundleCloudFormationOverridePropertyConfiguration", + "documentation":"

      An optional collection of structures that generate CloudFormation parameters to override the existing resource property values when the resource is exported to a new CloudFormation template.

      Use this field if the ExportFormat field of a StartAssetBundleExportJobRequest API call is set to CLOUDFORMATION_JSON.

      " + } + } + }, + "StartAssetBundleExportJobResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) for the export job.

      " + }, + "AssetBundleExportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

      " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Web Services response ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the response.

      ", + "location":"statusCode" + } + } + }, + "StartAssetBundleImportJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssetBundleImportJobId", + "AssetBundleImportSource" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

      The ID of the Amazon Web Services account to import assets into.

      ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssetBundleImportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

      " + }, + "AssetBundleImportSource":{ + "shape":"AssetBundleImportSource", + "documentation":"

      The source of the asset bundle zip file that contains the data that you want to import.

      " + }, + "OverrideParameters":{ + "shape":"AssetBundleImportJobOverrideParameters", + "documentation":"

      Optional overrides to be applied to the resource configuration before import.

      " + }, + "FailureAction":{ + "shape":"AssetBundleImportFailureAction", + "documentation":"

      The failure action for the import job.

      If you choose ROLLBACK, failed import jobs will attempt to undo any asset changes caused by the failed job.

      If you choose DO_NOTHING, failed import jobs will not attempt to roll back any asset changes caused by the failed job, possibly keeping the Amazon QuickSight account in an inconsistent state.

      " + } + } + }, + "StartAssetBundleImportJobResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) for the import job.

      " + }, + "AssetBundleImportJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

      The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job.

      " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Web Services response ID for this operation.

      " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

      The HTTP status of the response.

      ", + "location":"statusCode" + } + } + }, "StatePersistenceConfigurations":{ "type":"structure", "required":["Enabled"], @@ -27453,11 +28760,6 @@ "RESTRICTED_READER" ] }, - "Username":{ - "type":"string", - "max":64, - "min":1 - }, "VPCConnection":{ "type":"structure", "members":{ diff --git a/services/ram/pom.xml b/services/ram/pom.xml index dff111d7a33f..4731e763232b 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 2a3f67baa9b2..f489becd20e8 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 46bb1f0551a3..4cd434515474 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index db65b7c96e61..8bbce23c12ac 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -1076,7 +1076,7 @@ "errors":[ {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

      Returns information about Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports pagination.

      For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

      For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

      This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

      " + "documentation":"

      Describes existing Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports pagination.

      For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

      For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

      This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

      " }, "DescribeDBEngineVersions":{ "name":"DescribeDBEngineVersions", @@ -1121,7 +1121,7 @@ "errors":[ {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"

      Returns information about provisioned RDS instances. This API supports pagination.

      This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

      " + "documentation":"

      Describes provisioned RDS instances. This API supports pagination.

      This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances.

      " }, "DescribeDBLogFiles":{ "name":"DescribeDBLogFiles", @@ -1696,7 +1696,7 @@ {"shape":"DomainNotFoundFault"}, {"shape":"StorageTypeNotAvailableFault"} ], - "documentation":"

      Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request.

      For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

      For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

      " + "documentation":"

      Modifies the settings of an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request.

      For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

      For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

      " }, "ModifyDBClusterEndpoint":{ "name":"ModifyDBClusterEndpoint", @@ -3672,184 +3672,184 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

      A list of Availability Zones (AZs) where DB instances in the DB cluster can be created.

      For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      A list of Availability Zones (AZs) where DB instances in the DB cluster can be created.

      For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days for which automated backups are retained.

      Default: 1

      Constraints:

      • Must be a value from 1 to 35

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The number of days for which automated backups are retained.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Default: 1

      Constraints:

      • Must be a value from 1 to 35.

      " }, "CharacterSetName":{ "shape":"String", - "documentation":"

      A value that indicates that the DB cluster should be associated with the specified CharacterSet.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The name of the character set (CharacterSet) to associate the DB cluster with.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DatabaseName":{ "shape":"String", - "documentation":"

      The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      The DB cluster identifier. This parameter is stored as a lowercase string.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • First character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: my-cluster1

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The identifier for this DB cluster. This parameter is stored as a lowercase string.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • First character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: my-cluster1

      " }, "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

      The name of the DB cluster parameter group to associate with this DB cluster. If you do not specify a value, then the default DB cluster parameter group for the specified DB engine and version is used.

      Constraints:

      • If supplied, must match the name of an existing DB cluster parameter group.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The name of the DB cluster parameter group to associate with this DB cluster. If you don't specify a value, then the default DB cluster parameter group for the specified DB engine and version is used.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • If supplied, must match the name of an existing DB cluster parameter group.

      " }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

      A list of EC2 VPC security groups to associate with this DB cluster.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      A list of EC2 VPC security groups to associate with this DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

      A DB subnet group to associate with this DB cluster.

      This setting is required to create a Multi-AZ DB cluster.

      Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

      Example: mydbsubnetgroup

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      A DB subnet group to associate with this DB cluster.

      This setting is required to create a Multi-AZ DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must match the name of an existing DB subnet group.

      • Must not be default.

      Example: mydbsubnetgroup

      " }, "Engine":{ "shape":"String", - "documentation":"

      The name of the database engine to be used for this DB cluster.

      Valid Values:

      • aurora-mysql

      • aurora-postgresql

      • mysql

      • postgres

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The database engine to use for this DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      The version number of the database engine to use.

      To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command:

      aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

      You can supply either 5.7 or 8.0 to use the default engine version for Aurora MySQL version 2 or version 3, respectively.

      To list all of the available engine versions for Aurora PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for MySQL, use the following command:

      aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"

      Aurora MySQL

      For information, see Database engine updates for Amazon Aurora MySQL in the Amazon Aurora User Guide.

      Aurora PostgreSQL

      For information, see Amazon Aurora PostgreSQL releases and engine versions in the Amazon Aurora User Guide.

      MySQL

      For information, see Amazon RDS for MySQL in the Amazon RDS User Guide.

      PostgreSQL

      For information, see Amazon RDS for PostgreSQL in the Amazon RDS User Guide.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The version number of the database engine to use.

      To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command:

      aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

      You can supply either 5.7 or 8.0 to use the default engine version for Aurora MySQL version 2 or version 3, respectively.

      To list all of the available engine versions for Aurora PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for MySQL, use the following command:

      aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"

      For information about a specific engine, see the following topics:

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "Port":{ "shape":"IntegerOptional", - "documentation":"

      The port number on which the instances in the DB cluster accept connections.

      RDS for MySQL and Aurora MySQL

      Default: 3306

      Valid values: 1150-65535

      RDS for PostgreSQL and Aurora PostgreSQL

      Default: 5432

      Valid values: 1150-65535

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The port number on which the instances in the DB cluster accept connections.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Valid Values: 1150-65535

      Default:

      • RDS for MySQL and Aurora MySQL - 3306

      • RDS for PostgreSQL and Aurora PostgreSQL - 5432

      " }, "MasterUsername":{ "shape":"String", - "documentation":"

      The name of the master user for the DB cluster.

      Constraints:

      • Must be 1 to 16 letters or numbers.

      • First character must be a letter.

      • Can't be a reserved word for the chosen database engine.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The name of the master user for the DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must be 1 to 16 letters or numbers.

      • First character must be a letter.

      • Can't be a reserved word for the chosen database engine.

      " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

      Constraints:

      • Must contain from 8 to 41 characters.

      • Can't be specified if ManageMasterUserPassword is turned on.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The password for the master database user.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must contain from 8 to 41 characters.

      • Can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

      • Can't be specified if ManageMasterUserPassword is turned on.

      " }, "OptionGroupName":{ "shape":"String", - "documentation":"

      A value that indicates that the DB cluster should be associated with the specified option group.

      DB clusters are associated with a default option group that can't be modified.

      " + "documentation":"

      The option group to associate the DB cluster with.

      DB clusters are associated with a default option group that can't be modified.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      Format: ddd:hh24:mi-ddd:hh24:mi

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

      Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

      Constraints: Minimum 30-minute window.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The weekly time range during which system maintenance can occur.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

      Constraints:

      • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

      • Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun.

      • Must be in Universal Coordinated Time (UTC).

      • Must be at least 30 minutes.

      " }, "ReplicationSourceIdentifier":{ "shape":"String", - "documentation":"

      The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "Tags":{ "shape":"TagList", - "documentation":"

      Tags to assign to the DB cluster.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Tags to assign to the DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "StorageEncrypted":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB cluster is encrypted.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether the DB cluster is encrypted.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "KmsKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for an encrypted DB cluster.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      When a KMS key isn't specified in KmsKeyId:

      • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key.

      • If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS will use your default KMS key.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The Amazon Web Services KMS key identifier for an encrypted DB cluster.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      When a KMS key isn't specified in KmsKeyId:

      • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS uses the KMS key used to encrypt the source. Otherwise, Amazon RDS uses your default KMS key.

      • If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS uses your default KMS key.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, make sure to set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "PreSignedUrl":{ "shape":"String", - "documentation":"

      When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

      The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy.

      The presigned URL request must contain the following parameter values:

      • KmsKeyId - The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.

      • DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will be created in.

      • ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

      To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

      If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

      The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy.

      The presigned URL request must contain the following parameter values:

      • KmsKeyId - The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL.

      • DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will be created in.

      • ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

      To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

      If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

      The target backtrack window, in seconds. To disable backtracking, set this value to 0.

      Default: 0

      Constraints:

      • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

      Valid for: Aurora MySQL DB clusters only

      " + "documentation":"

      The target backtrack window, in seconds. To disable backtracking, set this value to 0.

      Valid for Cluster Type: Aurora MySQL DB clusters only

      Default: 0

      Constraints:

      • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

      " }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

      The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used.

      RDS for MySQL

      Possible values are error, general, and slowquery.

      RDS for PostgreSQL

      Possible values are postgresql and upgrade.

      Aurora MySQL

      Possible values are audit, error, general, and slowquery.

      Aurora PostgreSQL

      Possible value is postgresql.

      For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The list of log types that need to be enabled for exporting to CloudWatch Logs.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      The following values are valid for each DB engine:

      • Aurora MySQL - audit | error | general | slowquery

      • Aurora PostgreSQL - postgresql

      • RDS for MySQL - error | general | slowquery

      • RDS for PostgreSQL - postgresql | upgrade

      For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

      " }, "EngineMode":{ "shape":"String", - "documentation":"

      The DB engine mode of the DB cluster, either provisioned or serverless.

      The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

      For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide:

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The DB engine mode of the DB cluster, either provisioned or serverless.

      The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

      For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide:

      Valid for Cluster Type: Aurora DB clusters only

      " }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", - "documentation":"

      For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "GlobalClusterIdentifier":{ "shape":"String", - "documentation":"

      The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "EnableHttpEndpoint":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "Domain":{ "shape":"String", - "documentation":"

      The Active Directory directory ID to create the DB cluster in.

      For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.

      For more information, see Kerberos authentication in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The Active Directory directory ID to create the DB cluster in.

      For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.

      For more information, see Kerberos authentication in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

      Specify the name of the IAM role to be used when making API calls to the Directory Service.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The name of the IAM role to use when making API calls to the Directory Service.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "EnableGlobalWriteForwarding":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

      You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

      You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DBClusterInstanceClass":{ "shape":"String", - "documentation":"

      The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

      For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide.

      This setting is required to create a Multi-AZ DB cluster.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

      For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide.

      This setting is required to create a Multi-AZ DB cluster.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

      This setting is required to create a Multi-AZ DB cluster.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

      Valid for Cluster Type: Multi-AZ DB clusters only

      This setting is required to create a Multi-AZ DB cluster.

      " }, "StorageType":{ "shape":"String", - "documentation":"

      Specifies the storage type to be associated with the DB cluster.

      This setting is required to create a Multi-AZ DB cluster.

      When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

      Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

      Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The storage type to associate with the DB cluster.

      For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

      This setting is required to create a Multi-AZ DB cluster.

      When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Valid Values:

      • Aurora DB clusters - aurora | aurora-iopt1

      • Multi-AZ DB clusters - io1

      Default:

      • Aurora DB clusters - aurora

      • Multi-AZ DB clusters - io1

      " }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

      For information about valid IOPS values, see Provisioned IOPS storage in the Amazon RDS User Guide.

      This setting is required to create a Multi-AZ DB cluster.

      Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

      For information about valid IOPS values, see Provisioned IOPS storage in the Amazon RDS User Guide.

      This setting is required to create a Multi-AZ DB cluster.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Constraints:

      • Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

      " }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB cluster is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

      Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

      If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

      • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

      • If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

      If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

      • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

      • If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      Specifies whether the DB cluster is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

      If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

      • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.

      • If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.

      If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

      • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.

      • If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.

      " }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", - "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.

      If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

      Valid Values: 0, 1, 5, 10, 15, 30, 60

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0.

      If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

      Default: 0

      " }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

      The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to turn on Performance Insights for the DB cluster.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      Specifies whether to turn on Performance Insights for the DB cluster.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The number of days to retain Performance Insights data.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

      " }, "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfiguration"}, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB cluster.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The network type of the DB cluster.

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      Valid Values: IPV4 | DUAL

      " }, "DBSystemId":{ "shape":"String", @@ -3857,11 +3857,11 @@ }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      " }, "MasterUserSecretKmsKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " } }, "documentation":"

      " @@ -3943,31 +3943,31 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"

      The meaning of this parameter differs according to the database engine you use.

      MySQL

      The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

      Constraints:

      • Must contain 1 to 64 letters or numbers.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the specified database engine

      MariaDB

      The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

      Constraints:

      • Must contain 1 to 64 letters or numbers.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the specified database engine

      PostgreSQL

      The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.

      Constraints:

      • Must contain 1 to 63 letters, numbers, or underscores.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the specified database engine

      Oracle

      The Oracle System ID (SID) of the created DB instance. If you specify null, the default value ORCL is used. You can't specify the string NULL, or any other reserved word, for DBName.

      Default: ORCL

      Constraints:

      • Can't be longer than 8 characters

      Amazon RDS Custom for Oracle

      The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL.

      Default: ORCL

      Constraints:

      • It must contain 1 to 8 alphanumeric characters.

      • It must contain a letter.

      • It can't be a word reserved by the database engine.

      Amazon RDS Custom for SQL Server

      Not applicable. Must be null.

      SQL Server

      Not applicable. Must be null.

      Amazon Aurora MySQL

      The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

      Constraints:

      • It must contain 1 to 64 alphanumeric characters.

      • It can't be a word reserved by the database engine.

      Amazon Aurora PostgreSQL

      The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.

      Constraints:

      • It must contain 1 to 63 alphanumeric characters.

      • It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

      • It can't be a word reserved by the database engine.

      " + "documentation":"

      The meaning of this parameter differs depending on the database engine.

      Amazon Aurora MySQL
      Amazon Aurora PostgreSQL
      Amazon RDS Custom for Oracle
      Amazon RDS Custom for SQL Server
      RDS for MariaDB
      RDS for MySQL
      RDS for Oracle
      RDS for PostgreSQL
      RDS for SQL Server

      The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.

      Constraints:

      • Must contain 1 to 64 alphanumeric characters.

      • Can't be a word reserved by the database engine.

      The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.

      Default: postgres

      Constraints:

      • Must contain 1 to 63 alphanumeric characters.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

      • Can't be a word reserved by the database engine.

      The Oracle System ID (SID) of the created RDS Custom DB instance.

      Default: ORCL

      Constraints:

      • Must contain 1 to 8 alphanumeric characters.

      • Must contain a letter.

      • Can't be a word reserved by the database engine.

      Not applicable. Must be null.

      The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

      Constraints:

      • Must contain 1 to 64 letters or numbers.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the database engine.

      The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

      Constraints:

      • Must contain 1 to 64 letters or numbers.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the database engine.

      The Oracle System ID (SID) of the created DB instance.

      Default: ORCL

      Constraints:

      • Can't be longer than 8 characters.

      • Can't be a word reserved by the database engine, such as the string NULL.

      The name of the database to create when the DB instance is created.

      Default: postgres

      Constraints:

      • Must contain 1 to 63 letters, numbers, or underscores.

      • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

      • Can't be a word reserved by the database engine.

      Not applicable. Must be null.

      " }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

      The DB instance identifier. This parameter is stored as a lowercase string.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • First character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: mydbinstance

      " + "documentation":"

      The identifier for this DB instance. This parameter is stored as a lowercase string.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • First character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: mydbinstance

      " }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The amount of storage in gibibytes (GiB) to allocate for the DB instance.

      Type: Integer

      Amazon Aurora

      Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

      Amazon RDS Custom

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.

      • Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.

      MySQL

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      MariaDB

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      PostgreSQL

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      Oracle

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 10 to 3072.

      SQL Server

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3):

        • Enterprise and Standard editions: Must be an integer from 20 to 16384.

        • Web and Express editions: Must be an integer from 20 to 16384.

      • Provisioned IOPS storage (io1):

        • Enterprise and Standard editions: Must be an integer from 100 to 16384.

        • Web and Express editions: Must be an integer from 100 to 16384.

      • Magnetic storage (standard):

        • Enterprise and Standard editions: Must be an integer from 20 to 1024.

        • Web and Express editions: Must be an integer from 20 to 1024.

      " + "documentation":"

      The amount of storage in gibibytes (GiB) to allocate for the DB instance.

      This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

      Amazon RDS Custom
      RDS for MariaDB
      RDS for MySQL
      RDS for Oracle
      RDS for PostgreSQL
      RDS for SQL Server

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.

      • Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 10 to 3072.

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.

      • Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

      • Magnetic storage (standard): Must be an integer from 5 to 3072.

      Constraints to the amount of storage for each storage type are the following:

      • General Purpose (SSD) storage (gp2, gp3):

        • Enterprise and Standard editions: Must be an integer from 20 to 16384.

        • Web and Express editions: Must be an integer from 20 to 16384.

      • Provisioned IOPS storage (io1):

        • Enterprise and Standard editions: Must be an integer from 100 to 16384.

        • Web and Express editions: Must be an integer from 100 to 16384.

      • Magnetic storage (standard):

        • Enterprise and Standard editions: Must be an integer from 20 to 1024.

        • Web and Express editions: Must be an integer from 20 to 1024.

      " }, "DBInstanceClass":{ "shape":"String", - "documentation":"

      The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.

      " + "documentation":"

      The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide.

      " }, "Engine":{ "shape":"String", - "documentation":"

      The name of the database engine to be used for this instance.

      Not every database engine is available for every Amazon Web Services Region.

      Valid Values:

      • aurora-mysql (for Aurora MySQL DB instances)

      • aurora-postgresql (for Aurora PostgreSQL DB instances)

      • custom-oracle-ee (for RDS Custom for Oracle DB instances)

      • custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances)

      • custom-sqlserver-ee (for RDS Custom for SQL Server DB instances)

      • custom-sqlserver-se (for RDS Custom for SQL Server DB instances)

      • custom-sqlserver-web (for RDS Custom for SQL Server DB instances)

      • mariadb

      • mysql

      • oracle-ee

      • oracle-ee-cdb

      • oracle-se2

      • oracle-se2-cdb

      • postgres

      • sqlserver-ee

      • sqlserver-se

      • sqlserver-ex

      • sqlserver-web

      " + "documentation":"

      The database engine to use for this DB instance.

      Not every database engine is available in every Amazon Web Services Region.

      Valid Values:

      • aurora-mysql (for Aurora MySQL DB instances)

      • aurora-postgresql (for Aurora PostgreSQL DB instances)

      • custom-oracle-ee (for RDS Custom for Oracle DB instances)

      • custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances)

      • custom-sqlserver-ee (for RDS Custom for SQL Server DB instances)

      • custom-sqlserver-se (for RDS Custom for SQL Server DB instances)

      • custom-sqlserver-web (for RDS Custom for SQL Server DB instances)

      • mariadb

      • mysql

      • oracle-ee

      • oracle-ee-cdb

      • oracle-se2

      • oracle-se2-cdb

      • postgres

      • sqlserver-ee

      • sqlserver-se

      • sqlserver-ex

      • sqlserver-web

      " }, "MasterUsername":{ "shape":"String", - "documentation":"

      The name for the master user.

      Amazon Aurora

      Not applicable. The name for the master user is managed by the DB cluster.

      Amazon RDS

      Constraints:

      • Required.

      • Must be 1 to 16 letters, numbers, or underscores.

      • First character must be a letter.

      • Can't be a reserved word for the chosen database engine.

      " + "documentation":"

      The name for the master user.

      This setting doesn't apply to Amazon Aurora DB instances. The name for the master user is managed by the DB cluster.

      This setting is required for RDS DB instances.

      Constraints:

      • Must be 1 to 16 letters, numbers, or underscores.

      • First character must be a letter.

      • Can't be a reserved word for the chosen database engine.

      " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

      Amazon Aurora

      Not applicable. The password for the master user is managed by the DB cluster.

      Constraints: Can't be specified if ManageMasterUserPassword is turned on.

      MariaDB

      Constraints: Must contain from 8 to 41 characters.

      Microsoft SQL Server

      Constraints: Must contain from 8 to 128 characters.

      MySQL

      Constraints: Must contain from 8 to 41 characters.

      Oracle

      Constraints: Must contain from 8 to 30 characters.

      PostgreSQL

      Constraints: Must contain from 8 to 128 characters.

      " + "documentation":"

      The password for the master user.

      This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB cluster.

      Constraints:

      • Can't be specified if ManageMasterUserPassword is turned on.

      • Can include any printable ASCII character except \"/\", \"\"\", or \"@\".

      Length Constraints:

      • RDS for MariaDB - Must contain from 8 to 41 characters.

      • RDS for Microsoft SQL Server - Must contain from 8 to 128 characters.

      • RDS for MySQL - Must contain from 8 to 41 characters.

      • RDS for Oracle - Must contain from 8 to 30 characters.

      • RDS for PostgreSQL - Must contain from 8 to 128 characters.

      " }, "DBSecurityGroups":{ "shape":"DBSecurityGroupNameList", @@ -3975,71 +3975,71 @@ }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

      A list of Amazon EC2 VPC security groups to associate with this DB instance.

      Amazon Aurora

      Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster.

      Default: The default EC2 VPC security group for the DB subnet group's VPC.

      " + "documentation":"

      A list of Amazon EC2 VPC security groups to associate with this DB instance.

      This setting doesn't apply to Amazon Aurora DB instances. The associated list of EC2 VPC security groups is managed by the DB cluster.

      Default: The default EC2 VPC security group for the DB subnet group's VPC.

      " }, "AvailabilityZone":{ "shape":"String", - "documentation":"

      The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.

      Amazon Aurora

      Each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.

      Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

      Example: us-east-1d

      Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.

      " + "documentation":"

      The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.

      For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.

      Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

      Constraints:

      • The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment.

      • The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.

      Example: us-east-1d

      " }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

      A DB subnet group to associate with this DB instance.

      Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

      Example: mydbsubnetgroup

      " + "documentation":"

      A DB subnet group to associate with this DB instance.

      Constraints:

      • Must match the name of an existing DB subnet group.

      • Must not be default.

      Example: mydbsubnetgroup

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

      Format: ddd:hh24:mi-ddd:hh24:mi

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week.

      Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

      Constraints: Minimum 30-minute window.

      " + "documentation":"

      The time range each week during which system maintenance can occur. For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week.

      Constraints:

      • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

      • The day values must be mon | tue | wed | thu | fri | sat | sun.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred backup window.

      • Must be at least 30 minutes.

      " }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

      The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used.

      This setting doesn't apply to RDS Custom.

      Constraints:

      • It must be 1 to 255 letters, numbers, or hyphens.

      • The first character must be a letter.

      • It can't end with a hyphen or contain two consecutive hyphens.

      " + "documentation":"

      The name of the DB parameter group to associate with this DB instance. If you don't specify a value, then Amazon RDS uses the default DB parameter group for the specified DB engine and version.

      This setting doesn't apply to RDS Custom DB instances.

      Constraints:

      • Must be 1 to 255 letters, numbers, or hyphens.

      • The first character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      " }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

      Amazon Aurora

      Not applicable. The retention period for automated backups is managed by the DB cluster.

      Default: 1

      Constraints:

      • Must be a value from 0 to 35

      • Can't be set to 0 if the DB instance is a source to read replicas

      • Can't be set to 0 for an RDS Custom for Oracle DB instance

      " + "documentation":"

      The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

      This setting doesn't apply to Amazon Aurora DB instances. The retention period for automated backups is managed by the DB cluster.

      Default: 1

      Constraints:

      • Must be a value from 0 to 35.

      • Can't be set to 0 if the DB instance is a source to read replicas.

      • Can't be set to 0 for an RDS Custom for Oracle DB instance.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

      Amazon Aurora

      Not applicable. The daily time range for creating automated backups is managed by the DB cluster.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

      This setting doesn't apply to Amazon Aurora DB instances. The daily time range for creating automated backups is managed by the DB cluster.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      " }, "Port":{ "shape":"IntegerOptional", - "documentation":"

      The port number on which the database accepts connections.

      MySQL

      Default: 3306

      Valid values: 1150-65535

      Type: Integer

      MariaDB

      Default: 3306

      Valid values: 1150-65535

      Type: Integer

      PostgreSQL

      Default: 5432

      Valid values: 1150-65535

      Type: Integer

      Oracle

      Default: 1521

      Valid values: 1150-65535

      SQL Server

      Default: 1433

      Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

      Amazon Aurora

      Default: 3306

      Valid values: 1150-65535

      Type: Integer

      " + "documentation":"

      The port number on which the database accepts connections.

      This setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.

      Valid Values: 1150-65535

      Default:

      • RDS for MariaDB - 3306

      • RDS for Microsoft SQL Server - 1433

      • RDS for MySQL - 3306

      • RDS for Oracle - 1521

      • RDS for PostgreSQL - 5432

      Constraints:

      • For RDS for Microsoft SQL Server, the value can't be 1234, 1434, 3260, 3343, 3389, 47001, or 49152-49156.

      " }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.

      " + "documentation":"

      Specifies whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)

      • RDS Custom

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      The version number of the database engine to use.

      For a list of valid engine versions, use the DescribeDBEngineVersions operation.

      The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

      Amazon Aurora

      Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

      Amazon RDS Custom for Oracle

      A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

      Amazon RDS Custom for SQL Server

      See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

      MariaDB

      For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

      Microsoft SQL Server

      For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.

      MySQL

      For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

      Oracle

      For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.

      PostgreSQL

      For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

      " + "documentation":"

      The version number of the database engine to use.

      This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster.

      For a list of valid engine versions, use the DescribeDBEngineVersions operation.

      The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

      Amazon RDS Custom for Oracle
      Amazon RDS Custom for SQL Server
      RDS for MariaDB
      RDS for Microsoft SQL Server
      RDS for MySQL
      RDS for Oracle
      RDS for PostgreSQL

      A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

      See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

      For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide.

      For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide.

      For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide.

      For information, see Oracle Database Engine release notes in the Amazon RDS User Guide.

      For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

      " }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.

      If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false.

      " + "documentation":"

      Specifies whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.

      If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false.

      " }, "LicenseModel":{ "shape":"String", - "documentation":"

      License model information for this DB instance.

      Valid values: license-included | bring-your-own-license | general-public-license

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable.

      " + "documentation":"

      The license model information for this DB instance.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      Valid Values:

      • RDS for MariaDB - general-public-license

      • RDS for Microsoft SQL Server - license-included

      • RDS for MySQL - general-public-license

      • RDS for Oracle - bring-your-own-license | license-included

      • RDS for PostgreSQL - postgresql-license

      " }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage in the Amazon RDS User Guide.

      Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance.

      Amazon Aurora

      Not applicable. Storage is managed by the DB cluster.

      " + "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage in the Amazon RDS User Guide.

      This setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.

      Constraints:

      • For RDS for MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the storage amount for the DB instance.

      • For RDS for SQL Server - Must be a multiple between 1 and 50 of the storage amount for the DB instance.

      " }, "OptionGroupName":{ "shape":"String", - "documentation":"

      A value that indicates that the DB instance should be associated with the specified option group.

      Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable.

      " + "documentation":"

      The option group to associate the DB instance with.

      Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      " }, "CharacterSetName":{ "shape":"String", - "documentation":"

      For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet.

      This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database itself.

      Amazon Aurora

      Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

      " + "documentation":"

      For supported engines, the character set (CharacterSet) to associate the DB instance with.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora - The character set is managed by the DB cluster. For more information, see CreateDBCluster.

      • RDS Custom - However, if you need to change the character set, you can change it on the database itself.

      " }, "NcharCharacterSetName":{ "shape":"String", - "documentation":"

      The name of the NCHAR character set for the Oracle DB instance.

      This parameter doesn't apply to RDS Custom.

      " + "documentation":"

      The name of the NCHAR character set for the Oracle DB instance.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is publicly accessible.

      When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

      If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

      • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

      • If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

      If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

      • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

      • If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

      " + "documentation":"

      Specifies whether the DB instance is publicly accessible.

      When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

      If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

      • If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private.

      • If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public.

      If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

      • If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private.

      • If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public.

      " }, "Tags":{ "shape":"TagList", @@ -4047,51 +4047,51 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      The identifier of the DB cluster that the instance will belong to.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The identifier of the DB cluster that this DB instance will belong to.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "StorageType":{ "shape":"String", - "documentation":"

      Specifies the storage type to be associated with the DB instance.

      Valid values: gp2 | gp3 | io1 | standard

      If you specify io1 or gp3, you must also include a value for the Iops parameter.

      Default: io1 if the Iops parameter is specified, otherwise gp2

      Amazon Aurora

      Not applicable. Storage is managed by the DB cluster.

      " + "documentation":"

      The storage type to associate with the DB instance.

      If you specify io1 or gp3, you must also include a value for the Iops parameter.

      This setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.

      Valid Values: gp2 | gp3 | io1 | standard

      Default: io1, if the Iops parameter is specified. Otherwise, gp2.

      " }, "TdeCredentialArn":{ "shape":"String", - "documentation":"

      The ARN from the key store with which to associate the instance for TDE encryption.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable.

      " + "documentation":"

      The ARN from the key store with which to associate the instance for TDE encryption.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      " }, "TdeCredentialPassword":{ "shape":"String", - "documentation":"

      The password for the given ARN from the key store in order to access the device.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The password for the given ARN from the key store in order to access the device.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "StorageEncrypted":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.

      For RDS Custom instances, either set this parameter to true or leave it unset. If you set this parameter to false, RDS reports an error.

      Amazon Aurora

      Not applicable. The encryption for DB instances is managed by the DB cluster.

      " + "documentation":"

      Specifes whether the DB instance is encrypted. By default, it isn't encrypted.

      For RDS Custom DB instances, either enable this setting or leave it unset. Otherwise, Amazon RDS reports an error.

      This setting doesn't apply to Amazon Aurora DB instances. The encryption for DB instances is managed by the DB cluster.

      " }, "KmsKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for an encrypted DB instance.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      Amazon Aurora

      Not applicable. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

      If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Amazon RDS Custom

      A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key.

      " + "documentation":"

      The Amazon Web Services KMS key identifier for an encrypted DB instance.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      This setting doesn't apply to Amazon Aurora DB instances. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster.

      If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      For Amazon RDS Custom, a KMS key is required for DB instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key.

      " }, "Domain":{ "shape":"String", - "documentation":"

      The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

      For more information, see Kerberos Authentication in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. The domain is managed by the DB cluster.

      " + "documentation":"

      The Active Directory directory ID to create the DB instance in. Currently, only Microsoft SQL Server, MySQL, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

      For more information, see Kerberos Authentication in the Amazon RDS User Guide.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (The domain is managed by the DB cluster.)

      • RDS Custom

      " }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

      Amazon Aurora

      Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

      " + "documentation":"

      Spcifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

      This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", - "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.

      If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0.

      This setting doesn't apply to RDS Custom.

      Valid Values: 0, 1, 5, 10, 15, 30, 60

      " + "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0.

      If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0.

      This setting doesn't apply to RDS Custom DB instances.

      Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

      Default: 0

      " }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

      The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

      Specify the name of the IAM role to be used when making API calls to the Directory Service.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. The domain is managed by the DB cluster.

      " + "documentation":"

      The name of the IAM role to use when making API calls to the Directory Service.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (The domain is managed by the DB cluster.)

      • RDS Custom

      " }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

      A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      This setting doesn't apply to RDS Custom.

      Default: 1

      Valid Values: 0 - 15

      " + "documentation":"

      The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      Default: 1

      Valid Values: 0 - 15

      " }, "Timezone":{ "shape":"String", @@ -4099,59 +4099,59 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

      " + "documentation":"

      Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.)

      • RDS Custom

      " }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The number of days to retain Performance Insights data.

      This setting doesn't apply to RDS Custom DB instances.

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      If you specify a retention period that isn't valid, such as 94, Amazon RDS returns an error.

      " }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

      The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      Amazon Aurora

      Not applicable. CloudWatch Logs exports are managed by the DB cluster.

      RDS Custom

      Not applicable.

      MariaDB

      Possible values are audit, error, general, and slowquery.

      Microsoft SQL Server

      Possible values are agent and error.

      MySQL

      Possible values are audit, error, general, and slowquery.

      Oracle

      Possible values are alert, audit, listener, trace, and oemagent.

      PostgreSQL

      Possible values are postgresql and upgrade.

      " + "documentation":"

      The list of log types that need to be enabled for exporting to CloudWatch Logs. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (CloudWatch Logs exports are managed by the DB cluster.)

      • RDS Custom

      The following values are valid for each DB engine:

      • RDS for MariaDB - audit | error | general | slowquery

      • RDS for Microsoft SQL Server - agent | error

      • RDS for MySQL - audit | error | general | slowquery

      • RDS for Oracle - alert | audit | listener | trace | oemagent

      • RDS for PostgreSQL - postgresql | upgrade

      " }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", - "documentation":"

      The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable.

      " + "documentation":"

      The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      " }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

      Amazon Aurora

      Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.

      " + "documentation":"

      Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

      This setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.

      " }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

      For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. Storage is managed by the DB cluster.

      " + "documentation":"

      The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

      For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (Storage is managed by the DB cluster.)

      • RDS Custom

      " }, "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " + "documentation":"

      Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " }, "CustomIamInstanceProfile":{ "shape":"String", - "documentation":"

      The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements:

      • The profile must exist in your account.

      • The profile must have an IAM role that Amazon EC2 has permissions to assume.

      • The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom.

      For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide.

      This setting is required for RDS Custom.

      " + "documentation":"

      The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance.

      This setting is required for RDS Custom.

      Constraints:

      • The profile must exist in your account.

      • The profile must have an IAM role that Amazon EC2 has permissions to assume.

      • The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom.

      For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide.

      " }, "BackupTarget":{ "shape":"String", - "documentation":"

      Specifies where automated backups and manual snapshots are stored.

      Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region.

      For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      " + "documentation":"

      The location for storing automated backups and manual snapshots.

      Valie Values:

      • outposts (Amazon Web Services Outposts)

      • region (Amazon Web Services Region)

      Default: region

      For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      " }, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB instance.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      " + "documentation":"

      The network type of the DB instance.

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      Valid Values: IPV4 | DUAL

      " }, "StorageThroughput":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the storage throughput value for the DB instance.

      This setting applies only to the gp3 storage type.

      This setting doesn't apply to RDS Custom or Amazon Aurora.

      " + "documentation":"

      The storage throughput value for the DB instance.

      This setting applies only to the gp3 storage type.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      " }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      " + "documentation":"

      Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      " }, "MasterUserSecretKmsKeyId":{ "shape":"String", @@ -4159,7 +4159,7 @@ }, "CACertificateIdentifier":{ "shape":"String", - "documentation":"

      Specifies the CA certificate identifier to use for the DB instance’s server certificate.

      This setting doesn't apply to RDS Custom.

      For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

      " + "documentation":"

      The CA certificate identifier to use for the DB instance's server certificate.

      This setting doesn't apply to RDS Custom DB instances.

      For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

      " } }, "documentation":"

      " @@ -4766,11 +4766,11 @@ }, "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

      Provides the list of Availability Zones (AZs) where instances in the DB cluster can be created.

      " + "documentation":"

      The list of Availability Zones (AZs) where instances in the DB cluster can be created.

      " }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the number of days for which automatic DB snapshots are retained.

      " + "documentation":"

      The number of days for which automatic DB snapshots are retained.

      " }, "CharacterSetName":{ "shape":"String", @@ -4778,23 +4778,23 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

      Contains the name of the initial database of this DB cluster that was provided at create time, if one was specified when the DB cluster was created. This same name is returned for the life of the DB cluster.

      " + "documentation":"

      The name of the initial database that was specified for the DB cluster when it was created, if one was provided. This same name is returned for the life of the DB cluster.

      " }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.

      " + "documentation":"

      The user-supplied identifier for the DB cluster. This identifier is the unique key that identifies a DB cluster.

      " }, "DBClusterParameterGroup":{ "shape":"String", - "documentation":"

      Specifies the name of the DB cluster parameter group for the DB cluster.

      " + "documentation":"

      The name of the DB cluster parameter group for the DB cluster.

      " }, "DBSubnetGroup":{ "shape":"String", - "documentation":"

      Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.

      " + "documentation":"

      Information about the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.

      " }, "Status":{ "shape":"String", - "documentation":"

      Specifies the current state of this DB cluster.

      " + "documentation":"

      The current state of this DB cluster.

      " }, "AutomaticRestartTime":{ "shape":"TStamp", @@ -4802,7 +4802,7 @@ }, "PercentProgress":{ "shape":"String", - "documentation":"

      Specifies the progress of the operation as a percentage.

      " + "documentation":"

      The progress of the operation as a percentage.

      " }, "EarliestRestorableTime":{ "shape":"TStamp", @@ -4810,7 +4810,7 @@ }, "Endpoint":{ "shape":"String", - "documentation":"

      Specifies the connection endpoint for the primary instance of the DB cluster.

      " + "documentation":"

      The connection endpoint for the primary instance of the DB cluster.

      " }, "ReaderEndpoint":{ "shape":"String", @@ -4818,47 +4818,47 @@ }, "CustomEndpoints":{ "shape":"StringList", - "documentation":"

      Identifies all custom endpoints associated with the cluster.

      " + "documentation":"

      The custom endpoints associated with the DB cluster.

      " }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

      Specifies whether the DB cluster has instances in multiple Availability Zones.

      " + "documentation":"

      Indicates whether the DB cluster has instances in multiple Availability Zones.

      " }, "Engine":{ "shape":"String", - "documentation":"

      The name of the database engine to be used for this DB cluster.

      " + "documentation":"

      The database engine used for this DB cluster.

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      Indicates the database engine version.

      " + "documentation":"

      The version of the database engine.

      " }, "LatestRestorableTime":{ "shape":"TStamp", - "documentation":"

      Specifies the latest time to which a database can be restored with point-in-time restore.

      " + "documentation":"

      The latest time to which a database can be restored with point-in-time restore.

      " }, "Port":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the port that the database engine is listening on.

      " + "documentation":"

      The port that the database engine is listening on.

      " }, "MasterUsername":{ "shape":"String", - "documentation":"

      Contains the master username for the DB cluster.

      " + "documentation":"

      The master username for the DB cluster.

      " }, "DBClusterOptionGroupMemberships":{ "shape":"DBClusterOptionGroupMemberships", - "documentation":"

      Provides the list of option group memberships for this DB cluster.

      " + "documentation":"

      The list of option group memberships for this DB cluster.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      " + "documentation":"

      The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      " }, "ReplicationSourceIdentifier":{ "shape":"String", - "documentation":"

      Contains the identifier of the source DB cluster if this DB cluster is a read replica.

      " + "documentation":"

      The identifier of the source DB cluster if this DB cluster is a read replica.

      " }, "ReadReplicaIdentifiers":{ "shape":"ReadReplicaIdentifierList", @@ -4866,19 +4866,19 @@ }, "DBClusterMembers":{ "shape":"DBClusterMemberList", - "documentation":"

      Provides the list of instances that make up the DB cluster.

      " + "documentation":"

      The list of DB instances that make up the DB cluster.

      " }, "VpcSecurityGroups":{ "shape":"VpcSecurityGroupMembershipList", - "documentation":"

      Provides a list of VPC security groups that the DB cluster belongs to.

      " + "documentation":"

      The list of VPC security groups that the DB cluster belongs to.

      " }, "HostedZoneId":{ "shape":"String", - "documentation":"

      Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

      " + "documentation":"

      The ID that Amazon Route 53 assigns when you create a hosted zone.

      " }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

      Specifies whether the DB cluster is encrypted.

      " + "documentation":"

      Indicates whether the DB cluster is encrypted.

      " }, "KmsKeyId":{ "shape":"String", @@ -4894,19 +4894,19 @@ }, "AssociatedRoles":{ "shape":"DBClusterRoles", - "documentation":"

      Provides a list of the Amazon Web Services Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.

      " + "documentation":"

      A list of the Amazon Web Services Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.

      " }, "IAMDatabaseAuthenticationEnabled":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

      " + "documentation":"

      Indicates whether the mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled.

      " }, "CloneGroupId":{ "shape":"String", - "documentation":"

      Identifies the clone group to which the DB cluster is associated.

      " + "documentation":"

      The ID of the clone group with which the DB cluster is associated.

      " }, "ClusterCreateTime":{ "shape":"TStamp", - "documentation":"

      Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).

      " + "documentation":"

      The time when the DB cluster was created, in Universal Coordinated Time (UTC).

      " }, "EarliestBacktrackTime":{ "shape":"TStamp", @@ -4914,7 +4914,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

      The target backtrack window, in seconds. If this value is set to 0, backtracking is disabled for the DB cluster. Otherwise, backtracking is enabled.

      " + "documentation":"

      The target backtrack window, in seconds. If this value is set to 0, backtracking is disabled for the DB cluster. Otherwise, backtracking is enabled.

      " }, "BacktrackConsumedChangeRecords":{ "shape":"LongOptional", @@ -4926,7 +4926,7 @@ }, "Capacity":{ "shape":"IntegerOptional", - "documentation":"

      The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused.

      For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.

      " + "documentation":"

      The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused.

      For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.

      " }, "EngineMode":{ "shape":"String", @@ -4935,11 +4935,11 @@ "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"}, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

      Indicates if the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled.

      " + "documentation":"

      Indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled.

      " }, "HttpEndpointEnabled":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the HTTP endpoint for an Aurora Serverless v1 DB cluster is enabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      " + "documentation":"

      Indicates whether the HTTP endpoint for an Aurora Serverless v1 DB cluster is enabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      " }, "ActivityStreamMode":{ "shape":"ActivityStreamMode", @@ -4959,11 +4959,11 @@ }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

      Specifies whether tags are copied from the DB cluster to snapshots of the DB cluster.

      " + "documentation":"

      Indicates whether tags are copied from the DB cluster to snapshots of the DB cluster.

      " }, "CrossAccountClone":{ "shape":"BooleanOptional", - "documentation":"

      Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account.

      " + "documentation":"

      Indicates whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account.

      " }, "DomainMemberships":{ "shape":"DomainMembershipList", @@ -4972,15 +4972,15 @@ "TagList":{"shape":"TagList"}, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", - "documentation":"

      Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

      " + "documentation":"

      The status of write forwarding for a secondary cluster in an Aurora global database.

      " }, "GlobalWriteForwardingRequested":{ "shape":"BooleanOptional", - "documentation":"

      Specifies whether you have requested to enable write forwarding for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster.

      " + "documentation":"

      Specifies whether write forwarding is enabled for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster.

      " }, "PendingModifiedValues":{ "shape":"ClusterPendingModifiedValues", - "documentation":"

      A value that specifies that changes to the DB cluster are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

      " + "documentation":"

      Information about pending changes to the DB cluster. This information is returned only when there are pending changes. Specific changes are identified by subelements.

      " }, "DBClusterInstanceClass":{ "shape":"String", @@ -4996,11 +4996,11 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

      Specifies the accessibility options for the DB instance.

      When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      For more information, see CreateDBInstance.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " + "documentation":"

      Indicates whether the DB cluster is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.

      For more information, see CreateDBCluster.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

      A value that indicates that minor version patches are applied automatically.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " + "documentation":"

      Indicates whether minor version patches are applied automatically.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", @@ -5012,7 +5012,7 @@ }, "PerformanceInsightsEnabled":{ "shape":"BooleanOptional", - "documentation":"

      True if Performance Insights is enabled for the DB cluster, and otherwise false.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " + "documentation":"

      Indicates whether Performance Insights is enabled for the DB cluster.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", @@ -5020,12 +5020,12 @@ }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      This setting is only for non-Aurora Multi-AZ DB clusters.

      " + "documentation":"

      The number of days to retain Performance Insights data.

      This setting is only for non-Aurora Multi-AZ DB clusters.

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      " }, "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfigurationInfo"}, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB instance.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      This setting is only for Aurora DB clusters.

      " + "documentation":"

      The network type of the DB instance.

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      This setting is only for Aurora DB clusters.

      Valid Values: IPV4 | DUAL

      " }, "DBSystemId":{ "shape":"String", @@ -5033,7 +5033,7 @@ }, "MasterUserSecret":{ "shape":"MasterUserSecret", - "documentation":"

      Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      " + "documentation":"

      The secret managed by RDS in Amazon Web Services Secrets Manager for the master user password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      " }, "IOOptimizedNextAllowedModificationTime":{ "shape":"TStamp", @@ -5293,7 +5293,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

      A pagination token that can be used in a later DescribeDBClusters request.

      " + "documentation":"

      A pagination token that can be used in a later DescribeDBClusters request.

      " }, "DBClusters":{ "shape":"DBClusterList", @@ -5822,19 +5822,19 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

      Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

      " + "documentation":"

      The user-supplied database identifier. This identifier is the unique key that identifies a DB instance.

      " }, "DBInstanceClass":{ "shape":"String", - "documentation":"

      Contains the name of the compute and memory capacity class of the DB instance.

      " + "documentation":"

      The name of the compute and memory capacity class of the DB instance.

      " }, "Engine":{ "shape":"String", - "documentation":"

      The name of the database engine to be used for this DB instance.

      " + "documentation":"

      The database engine used for this DB instance.

      " }, "DBInstanceStatus":{ "shape":"String", - "documentation":"

      Specifies the current state of this database.

      For information about DB instance statuses, see Viewing DB instance status in the Amazon RDS User Guide.

      " + "documentation":"

      The current state of this database.

      For information about DB instance statuses, see Viewing DB instance status in the Amazon RDS User Guide.

      " }, "AutomaticRestartTime":{ "shape":"TStamp", @@ -5842,31 +5842,31 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

      Contains the master username for the DB instance.

      " + "documentation":"

      The master username for the DB instance.

      " }, "DBName":{ "shape":"String", - "documentation":"

      The meaning of this parameter differs according to the database engine you use.

      MySQL, MariaDB, SQL Server, PostgreSQL

      Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.

      Type: String

      Oracle

      Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance.

      " + "documentation":"

      The meaning of this parameter differs depending on the database engine.

      • For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance.

      • For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.

      " }, "Endpoint":{ "shape":"Endpoint", - "documentation":"

      Specifies the connection endpoint.

      The endpoint might not be shown for instances whose status is creating.

      " + "documentation":"

      The connection endpoint for the DB instance.

      The endpoint might not be shown for instances with the status of creating.

      " }, "AllocatedStorage":{ "shape":"Integer", - "documentation":"

      Specifies the allocated storage size specified in gibibytes (GiB).

      " + "documentation":"

      The amount of storage in gibibytes (GiB) allocated for the DB instance.

      " }, "InstanceCreateTime":{ "shape":"TStamp", - "documentation":"

      Provides the date and time the DB instance was created.

      " + "documentation":"

      The date and time when the DB instance was created.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod.

      " }, "BackupRetentionPeriod":{ "shape":"Integer", - "documentation":"

      Specifies the number of days for which automatic DB snapshots are retained.

      " + "documentation":"

      The number of days for which automatic DB snapshots are retained.

      " }, "DBSecurityGroups":{ "shape":"DBSecurityGroupMembershipList", @@ -5874,55 +5874,55 @@ }, "VpcSecurityGroups":{ "shape":"VpcSecurityGroupMembershipList", - "documentation":"

      Provides a list of VPC security group elements that the DB instance belongs to.

      " + "documentation":"

      The list of Amazon EC2 VPC security groups that the DB instance belongs to.

      " }, "DBParameterGroups":{ "shape":"DBParameterGroupStatusList", - "documentation":"

      Provides the list of DB parameter groups applied to this DB instance.

      " + "documentation":"

      The list of DB parameter groups applied to this DB instance.

      " }, "AvailabilityZone":{ "shape":"String", - "documentation":"

      Specifies the name of the Availability Zone the DB instance is located in.

      " + "documentation":"

      The name of the Availability Zone where the DB instance is located.

      " }, "DBSubnetGroup":{ "shape":"DBSubnetGroup", - "documentation":"

      Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

      " + "documentation":"

      Information about the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      " + "documentation":"

      The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      " }, "PendingModifiedValues":{ "shape":"PendingModifiedValues", - "documentation":"

      A value that specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.

      " + "documentation":"

      Information about pending changes to the DB instance. This information is returned only when there are pending changes. Specific changes are identified by subelements.

      " }, "LatestRestorableTime":{ "shape":"TStamp", - "documentation":"

      Specifies the latest time to which a database can be restored with point-in-time restore.

      " + "documentation":"

      The latest time to which a database in this DB instance can be restored with point-in-time restore.

      " }, "MultiAZ":{ "shape":"Boolean", - "documentation":"

      Specifies if the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Indicates whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom DB instances.

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      Indicates the database engine version.

      " + "documentation":"

      The version of the database engine.

      " }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

      A value that indicates that minor version patches are applied automatically.

      " + "documentation":"

      Indicates whether minor version patches are applied automatically.

      " }, "ReadReplicaSourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

      Contains the identifier of the source DB instance if this DB instance is a read replica.

      " + "documentation":"

      The identifier of the source DB instance if this DB instance is a read replica.

      " }, "ReadReplicaDBInstanceIdentifiers":{ "shape":"ReadReplicaDBInstanceIdentifierList", - "documentation":"

      Contains one or more identifiers of the read replicas associated with this DB instance.

      " + "documentation":"

      The identifiers of the read replicas associated with this DB instance.

      " }, "ReadReplicaDBClusterIdentifiers":{ "shape":"ReadReplicaDBClusterIdentifierList", - "documentation":"

      Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output doesn't contain information about cross-Region Aurora read replicas.

      Currently, each RDS DB instance can have only one Aurora read replica.

      " + "documentation":"

      The identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output doesn't contain information about cross-Region Aurora read replicas.

      Currently, each RDS DB instance can have only one Aurora read replica.

      " }, "ReplicaMode":{ "shape":"ReplicaMode", @@ -5930,15 +5930,15 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"

      License model information for this DB instance. This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The license model information for this DB instance. This setting doesn't apply to RDS Custom DB instances.

      " }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the Provisioned IOPS (I/O operations per second) value.

      " + "documentation":"

      The Provisioned IOPS (I/O operations per second) value for the DB instance.

      " }, "OptionGroupMemberships":{ "shape":"OptionGroupMembershipList", - "documentation":"

      Provides the list of option group memberships for this DB instance.

      " + "documentation":"

      The list of option group memberships for this DB instance.

      " }, "CharacterSetName":{ "shape":"String", @@ -5954,15 +5954,15 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"

      Specifies the accessibility options for the DB instance.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      For more information, see CreateDBInstance.

      " + "documentation":"

      Indicates whether the DB instance is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      For more information, see CreateDBInstance.

      " }, "StatusInfos":{ "shape":"DBInstanceStatusInfoList", - "documentation":"

      The status of a read replica. If the instance isn't a read replica, this is blank.

      " + "documentation":"

      The status of a read replica. If the DB instance isn't a read replica, the value is blank.

      " }, "StorageType":{ "shape":"String", - "documentation":"

      Specifies the storage type associated with the DB instance.

      " + "documentation":"

      The storage type associated with the DB instance.

      " }, "TdeCredentialArn":{ "shape":"String", @@ -5970,19 +5970,19 @@ }, "DbInstancePort":{ "shape":"Integer", - "documentation":"

      Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

      " + "documentation":"

      The port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.

      " }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.

      " + "documentation":"

      If the DB instance is a member of a DB cluster, indicates the name of the DB cluster that the DB instance is a member of.

      " }, "StorageEncrypted":{ "shape":"Boolean", - "documentation":"

      Specifies whether the DB instance is encrypted.

      " + "documentation":"

      Indicates whether the DB instance is encrypted.

      " }, "KmsKeyId":{ "shape":"String", - "documentation":"

      If StorageEncrypted is true, the Amazon Web Services KMS key identifier for the encrypted DB instance.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      " + "documentation":"

      If StorageEncrypted is enabled, the Amazon Web Services KMS key identifier for the encrypted DB instance.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      " }, "DbiResourceId":{ "shape":"String", @@ -5998,7 +5998,7 @@ }, "CopyTagsToSnapshot":{ "shape":"Boolean", - "documentation":"

      Specifies whether tags are copied from the DB instance to snapshots of the DB instance.

      Amazon Aurora

      Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see DBCluster.

      " + "documentation":"

      Indicates whether tags are copied from the DB instance to snapshots of the DB instance.

      This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see DBCluster.

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", @@ -6014,7 +6014,7 @@ }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

      A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      " + "documentation":"

      The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      " }, "DBInstanceArn":{ "shape":"String", @@ -6026,11 +6026,11 @@ }, "IAMDatabaseAuthenticationEnabled":{ "shape":"Boolean", - "documentation":"

      True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

      For a list of engine versions that support IAM database authentication, see IAM database authentication in the Amazon RDS User Guide and IAM database authentication in Aurora in the Amazon Aurora User Guide.

      " + "documentation":"

      Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled for the DB instance.

      For a list of engine versions that support IAM database authentication, see IAM database authentication in the Amazon RDS User Guide and IAM database authentication in Aurora in the Amazon Aurora User Guide.

      " }, "PerformanceInsightsEnabled":{ "shape":"BooleanOptional", - "documentation":"

      True if Performance Insights is enabled for the DB instance, and otherwise false.

      " + "documentation":"

      Indicates whether Performance Insights is enabled for the DB instance.

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", @@ -6038,11 +6038,11 @@ }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      " + "documentation":"

      The number of days to retain Performance Insights data.

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      " }, "EnabledCloudwatchLogsExports":{ "shape":"LogTypeList", - "documentation":"

      A list of log types that this DB instance is configured to export to CloudWatch Logs.

      Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon RDS User Guide.

      " + "documentation":"

      A list of log types that this DB instance is configured to export to CloudWatch Logs.

      Log types vary by DB engine. For information about the log types for each DB engine, see Monitoring Amazon RDS log files in the Amazon RDS User Guide.

      " }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", @@ -6050,7 +6050,7 @@ }, "DeletionProtection":{ "shape":"Boolean", - "documentation":"

      Indicates if the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. For more information, see Deleting a DB Instance.

      " + "documentation":"

      Indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. For more information, see Deleting a DB Instance.

      " }, "AssociatedRoles":{ "shape":"DBInstanceRoles", @@ -6058,7 +6058,7 @@ }, "ListenerEndpoint":{ "shape":"Endpoint", - "documentation":"

      Specifies the listener connection endpoint for SQL Server Always On.

      " + "documentation":"

      The listener connection endpoint for SQL Server Always On.

      " }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", @@ -6071,7 +6071,7 @@ }, "CustomerOwnedIpEnabled":{ "shape":"BooleanOptional", - "documentation":"

      Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " + "documentation":"

      Indicates whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " }, "AwsBackupRecoveryPointArn":{ "shape":"String", @@ -6111,11 +6111,11 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

      Specifies where automated backups and manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

      " + "documentation":"

      The location where automated backups and manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

      " }, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB instance.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide and Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      " + "documentation":"

      The network type of the DB instance.

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide and Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      Valid Values: IPV4 | DUAL

      " }, "ActivityStreamPolicyStatus":{ "shape":"ActivityStreamPolicyStatus", @@ -6123,15 +6123,15 @@ }, "StorageThroughput":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the storage throughput for the DB instance.

      This setting applies only to the gp3 storage type.

      " + "documentation":"

      The storage throughput for the DB instance.

      This setting applies only to the gp3 storage type.

      " }, "DBSystemId":{ "shape":"String", - "documentation":"

      The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is valid for RDS Custom only.

      " + "documentation":"

      The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is only valid for RDS Custom DB instances.

      " }, "MasterUserSecret":{ "shape":"MasterUserSecret", - "documentation":"

      Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      " + "documentation":"

      The secret managed by RDS in Amazon Web Services Secrets Manager for the master user password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      " }, "CertificateDetails":{ "shape":"CertificateDetails", @@ -6139,7 +6139,7 @@ }, "ReadReplicaSourceDBClusterIdentifier":{ "shape":"String", - "documentation":"

      Contains the identifier of the source DB cluster if this DB instance is a read replica.

      " + "documentation":"

      The identifier of the source DB cluster if this DB instance is a read replica.

      " } }, "documentation":"

      Contains the details of an Amazon RDS DB instance.

      This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.

      ", @@ -7977,11 +7977,11 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      The user-supplied DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn't case-sensitive.

      Constraints:

      • If supplied, must match an existing DBClusterIdentifier.

      " + "documentation":"

      The user-supplied DB cluster identifier or the Amazon Resource Name (ARN) of the DB cluster. If this parameter is specified, information for only the specific DB cluster is returned. This parameter isn't case-sensitive.

      Constraints:

      • If supplied, must match an existing DB cluster identifier.

      " }, "Filters":{ "shape":"FilterList", - "documentation":"

      A filter that specifies one or more DB clusters to describe.

      Supported filters:

      • clone-group-id - Accepts clone group identifiers. The results list only includes information about the DB clusters associated with these clone groups.

      • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs.

      • db-cluster-resource-id - Accepts DB cluster resource identifiers. The results list will only include information about the DB clusters identified by these DB cluster resource identifiers.

      • domain - Accepts Active Directory directory IDs. The results list only includes information about the DB clusters associated with these domains.

      • engine - Accepts engine names. The results list only includes information about the DB clusters for these engines.

      " + "documentation":"

      A filter that specifies one or more DB clusters to describe.

      Supported Filters:

      • clone-group-id - Accepts clone group identifiers. The results list only includes information about the DB clusters associated with these clone groups.

      • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs.

      • db-cluster-resource-id - Accepts DB cluster resource identifiers. The results list will only include information about the DB clusters identified by these DB cluster resource identifiers.

      • domain - Accepts Active Directory directory IDs. The results list only includes information about the DB clusters associated with these domains.

      • engine - Accepts engine names. The results list only includes information about the DB clusters for these engines.

      " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -7993,7 +7993,7 @@ }, "IncludeShared":{ "shape":"Boolean", - "documentation":"

      Optional Boolean parameter that specifies whether the output includes information about clusters shared from other Amazon Web Services accounts.

      " + "documentation":"

      Specifies whether the output includes information about clusters shared from other Amazon Web Services accounts.

      " } }, "documentation":"

      " @@ -8078,11 +8078,11 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

      The user-supplied instance identifier or the Amazon Resource Name (ARN) of the DB instance. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

      Constraints:

      • If supplied, must match the identifier of an existing DBInstance.

      " + "documentation":"

      The user-supplied instance identifier or the Amazon Resource Name (ARN) of the DB instance. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive.

      Constraints:

      • If supplied, must match the identifier of an existing DB instance.

      " }, "Filters":{ "shape":"FilterList", - "documentation":"

      A filter that specifies one or more DB instances to describe.

      Supported filters:

      • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB instances associated with the DB clusters identified by these ARNs.

      • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list only includes information about the DB instances identified by these ARNs.

      • dbi-resource-id - Accepts DB instance resource identifiers. The results list will only include information about the DB instances identified by these DB instance resource identifiers.

      • domain - Accepts Active Directory directory IDs. The results list only includes information about the DB instances associated with these domains.

      • engine - Accepts engine names. The results list only includes information about the DB instances for these engines.

      " + "documentation":"

      A filter that specifies one or more DB instances to describe.

      Supported Filters:

      • db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB instances associated with the DB clusters identified by these ARNs.

      • db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list only includes information about the DB instances identified by these ARNs.

      • dbi-resource-id - Accepts DB instance resource identifiers. The results list only includes information about the DB instances identified by these DB instance resource identifiers.

      • domain - Accepts Active Directory directory IDs. The results list only includes information about the DB instances associated with these domains.

      • engine - Accepts engine names. The results list only includes information about the DB instances for these engines.

      " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -10304,164 +10304,164 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

      The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive.

      Constraints: This identifier must match the identifier of an existing DB cluster.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must match the identifier of an existing DB cluster.

      " }, "NewDBClusterIdentifier":{ "shape":"String", - "documentation":"

      The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens

      • The first character must be a letter

      • Can't end with a hyphen or contain two consecutive hyphens

      Example: my-cluster2

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • The first character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: my-cluster2

      " }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

      A value that indicates whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window.

      Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them.

      By default, this parameter is disabled.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window.

      Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them.

      By default, this parameter is disabled.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days for which automated backups are retained. Specify a minimum value of 1.

      Default: 1

      Constraints:

      • Must be a value from 1 to 35

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The number of days for which automated backups are retained. Specify a minimum value of 1.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Default: 1

      Constraints:

      • Must be a value from 1 to 35.

      " }, "DBClusterParameterGroupName":{ "shape":"String", - "documentation":"

      The name of the DB cluster parameter group to use for the DB cluster.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The name of the DB cluster parameter group to use for the DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

      A list of VPC security groups that the DB cluster will belong to.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      A list of EC2 VPC security groups to associate with this DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "Port":{ "shape":"IntegerOptional", - "documentation":"

      The port number on which the DB cluster accepts connections.

      Constraints: Value must be 1150-65535

      Default: The same port as the original DB cluster.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The port number on which the DB cluster accepts connections.

      Valid for Cluster Type: Aurora DB clusters only

      Valid Values: 1150-65535

      Default: The same port as the original DB cluster.

      " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The new password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

      Constraints:

      • Must contain from 8 to 41 characters.

      • Can't be specified if ManageMasterUserPassword is turned on.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The new password for the master database user.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must contain from 8 to 41 characters.

      • Can contain any printable ASCII character except \"/\", \"\"\", or \"@\".

      • Can't be specified if ManageMasterUserPassword is turned on.

      " }, "OptionGroupName":{ "shape":"String", - "documentation":"

      A value that indicates that the DB cluster should be associated with the specified option group.

      DB clusters are associated with a default option group that can't be modified.

      " + "documentation":"

      The option group to associate the DB cluster with.

      DB clusters are associated with a default option group that can't be modified.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      Format: ddd:hh24:mi-ddd:hh24:mi

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

      Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

      Constraints: Minimum 30-minute window.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

      Constraints:

      • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

      • Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun.

      • Must be in Universal Coordinated Time (UTC).

      • Must be at least 30 minutes.

      " }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      For more information, see IAM Database Authentication in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

      The target backtrack window, in seconds. To disable backtracking, set this value to 0.

      Default: 0

      Constraints:

      • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

      Valid for: Aurora MySQL DB clusters only

      " + "documentation":"

      The target backtrack window, in seconds. To disable backtracking, set this value to 0.

      Valid for Cluster Type: Aurora MySQL DB clusters only

      Default: 0

      Constraints:

      • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

      " }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"

      The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster. The values in the list depend on the DB engine being used.

      RDS for MySQL

      Possible values are error, general, and slowquery.

      RDS for PostgreSQL

      Possible values are postgresql and upgrade.

      Aurora MySQL

      Possible values are audit, error, general, and slowquery.

      Aurora PostgreSQL

      Possible value is postgresql.

      For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      The following values are valid for each DB engine:

      • Aurora MySQL - audit | error | general | slowquery

      • Aurora PostgreSQL - postgresql

      • RDS for MySQL - error | general | slowquery

      • RDS for PostgreSQL - postgresql | upgrade

      For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

      For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

      If the cluster that you're modifying has one or more read replicas, all replicas must be running an engine version that's the same or later than the version you specify.

      To list all of the available engine versions for Aurora MySQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for Aurora PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for MySQL, use the following command:

      aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

      If the cluster that you're modifying has one or more read replicas, all replicas must be running an engine version that's the same or later than the version you specify.

      To list all of the available engine versions for Aurora MySQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for Aurora PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for MySQL, use the following command:

      aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"

      To list all of the available engine versions for RDS for PostgreSQL, use the following command:

      aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

      A value that indicates whether major version upgrades are allowed.

      Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether major version upgrades are allowed.

      Valid for Cluster Type: Aurora DB clusters only

      Constraints:

      • You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version.

      " }, "DBInstanceParameterGroupName":{ "shape":"String", - "documentation":"

      The name of the DB parameter group to apply to all instances of the DB cluster.

      When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.

      Default: The existing name setting

      Constraints:

      • The DB parameter group must be in the same DB parameter group family as this DB cluster.

      • The DBInstanceParameterGroupName parameter is valid in combination with the AllowMajorVersionUpgrade parameter for a major version upgrade only.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The name of the DB parameter group to apply to all instances of the DB cluster.

      When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.

      Valid for Cluster Type: Aurora DB clusters only

      Default: The existing name setting

      Constraints:

      • The DB parameter group must be in the same DB parameter group family as this DB cluster.

      • The DBInstanceParameterGroupName parameter is valid in combination with the AllowMajorVersionUpgrade parameter for a major version upgrade only.

      " }, "Domain":{ "shape":"String", - "documentation":"

      The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation.

      For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation.

      For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

      Specify the name of the IAM role to be used when making API calls to the Directory Service.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The name of the IAM role to use when making API calls to the Directory Service.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", - "documentation":"

      The scaling properties of the DB cluster. You can only modify scaling properties for DB clusters in serverless DB engine mode.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The scaling properties of the DB cluster. You can only modify scaling properties for DB clusters in serverless DB engine mode.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "EnableHttpEndpoint":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled.

      When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor.

      For more information, see Using the Data API for Aurora Serverless v1 in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "EnableGlobalWriteForwarding":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

      You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.

      You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "DBClusterInstanceClass":{ "shape":"String", - "documentation":"

      The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

      For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

      For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "StorageType":{ "shape":"String", - "documentation":"

      Specifies the storage type to be associated with the DB cluster.

      When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

      Valid values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters)

      Default: aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters)

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The storage type to associate with the DB cluster.

      For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

      When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Valid Values:

      • Aurora DB clusters - aurora | aurora-iopt1

      • Multi-AZ DB clusters - io1

      Default:

      • Aurora DB clusters - aurora

      • Multi-AZ DB clusters - io1

      " }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

      For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide.

      Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.

      For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Constraints:

      • Must be a multiple between .5 and 50 of the storage amount for the DB cluster.

      " }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", - "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0.

      If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

      Valid Values: 0, 1, 5, 10, 15, 30, 60

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0.

      If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

      Default: 0

      " }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

      The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to turn on Performance Insights for the DB cluster.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      Specifies whether to turn on Performance Insights for the DB cluster.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for Cluster Type: Multi-AZ DB clusters only

      " }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

      Valid for: Multi-AZ DB clusters only

      " + "documentation":"

      The number of days to retain Performance Insights data.

      Valid for Cluster Type: Multi-AZ DB clusters only

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

      " }, "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfiguration"}, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB cluster.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The network type of the DB cluster.

      The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters only

      Valid Values: IPV4 | DUAL

      " }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

      If the DB cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

      If the DB cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

      If the DB cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

      If the DB cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "RotateMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Constraints:

      • You must apply the change immediately when rotating the master user password.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      Constraints:

      • You must apply the change immediately when rotating the master user password.

      " }, "MasterUserSecretKmsKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

      This setting is valid only if both of the following conditions are met:

      • The DB cluster doesn't manage the master user password in Amazon Web Services Secrets Manager.

        If the DB cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret.

      • You are turning on ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager.

        If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for: Aurora DB clusters and Multi-AZ DB clusters

      " + "documentation":"

      The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

      This setting is valid only if both of the following conditions are met:

      • The DB cluster doesn't manage the master user password in Amazon Web Services Secrets Manager.

        If the DB cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret.

      • You are turning on ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager.

        If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

      " }, "EngineMode":{ "shape":"String", - "documentation":"

      The DB engine mode of the DB cluster, either provisioned or serverless.

      The DB engine mode can be modified only from serverless to provisioned.

      For more information, see CreateDBCluster.

      Valid for: Aurora DB clusters only

      " + "documentation":"

      The DB engine mode of the DB cluster, either provisioned or serverless.

      The DB engine mode can be modified only from serverless to provisioned.

      For more information, see CreateDBCluster.

      Valid for Cluster Type: Aurora DB clusters only

      " }, "AllowEngineModeChange":{ "shape":"Boolean", - "documentation":"

      A value that indicates whether engine mode changes from serverless to provisioned are allowed.

      Constraints: You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode.

      Valid for: Aurora Serverless v1 DB clusters only

      " + "documentation":"

      Specifies whether engine mode changes from serverless to provisioned are allowed.

      Valid for Cluster Type: Aurora Serverless v1 DB clusters only

      Constraints:

      • You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode.

      " } }, "documentation":"

      " @@ -10528,211 +10528,215 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

      The DB instance identifier. This value is stored as a lowercase string.

      Constraints:

      • Must match the identifier of an existing DBInstance.

      " + "documentation":"

      The identifier of DB instance to modify. This value is stored as a lowercase string.

      Constraints:

      • Must match the identifier of an existing DB instance.

      " }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The new amount of storage in gibibytes (GiB) to allocate for the DB instance.

      For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

      For the valid values for allocated storage for each engine, see CreateDBInstance.

      " + "documentation":"

      The new amount of storage in gibibytes (GiB) to allocate for the DB instance.

      For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

      For the valid values for allocated storage for each engine, see CreateDBInstance.

      " }, "DBInstanceClass":{ "shape":"String", - "documentation":"

      The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server.

      If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request.

      Default: Uses existing setting

      " + "documentation":"

      The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server.

      If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request.

      Default: Uses existing setting

      " }, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

      The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you enable ApplyImmediately.

      This parameter doesn't apply to RDS Custom.

      Constraints: If supplied, must match the name of an existing DBSubnetGroup.

      Example: mydbsubnetgroup

      " + "documentation":"

      The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you enable ApplyImmediately.

      This parameter doesn't apply to RDS Custom DB instances.

      Constraints:

      • If supplied, must match existing DB subnet group.

      Example: mydbsubnetgroup

      " }, "DBSecurityGroups":{ "shape":"DBSecurityGroupNameList", - "documentation":"

      A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.

      This setting doesn't apply to RDS Custom.

      Constraints:

      • If supplied, must match existing DBSecurityGroups.

      " + "documentation":"

      A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.

      This setting doesn't apply to RDS Custom DB instances.

      Constraints:

      • If supplied, must match existing DB security groups.

      " }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

      A list of Amazon EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see ModifyDBCluster.

      Constraints:

      • If supplied, must match existing VpcSecurityGroupIds.

      " + "documentation":"

      A list of Amazon EC2 VPC security groups to associate with this DB instance. This change is asynchronously applied as soon as possible.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see ModifyDBCluster.)

      • RDS Custom

      Constraints:

      • If supplied, must match existing VPC security group IDs.

      " }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

      A value that indicates whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled.

      If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance in the Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied.

      " + "documentation":"

      Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled.

      If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance in the Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied.

      " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

      Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

      This setting doesn't apply to RDS Custom.

      Amazon Aurora

      Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.

      Default: Uses existing setting

      Constraints: Can't be specified if ManageMasterUserPassword is turned on.

      MariaDB

      Constraints: Must contain from 8 to 41 characters.

      Microsoft SQL Server

      Constraints: Must contain from 8 to 128 characters.

      MySQL

      Constraints: Must contain from 8 to 41 characters.

      Oracle

      Constraints: Must contain from 8 to 30 characters.

      PostgreSQL

      Constraints: Must contain from 8 to 128 characters.

      Amazon RDS API operations never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

      " + "documentation":"

      The new password for the master user.

      Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

      Amazon RDS API operations never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.

      This setting doesn't apply to the following DB instances:

      • Amazon Aurora (The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.)

      • RDS Custom

      Default: Uses existing setting

      Constraints:

      • Can't be specified if ManageMasterUserPassword is turned on.

      • Can include any printable ASCII character except \"/\", \"\"\", or \"@\".

      Length Constraints:

      • RDS for MariaDB - Must contain from 8 to 41 characters.

      • RDS for Microsoft SQL Server - Must contain from 8 to 128 characters.

      • RDS for MySQL - Must contain from 8 to 41 characters.

      • RDS for Oracle - Must contain from 8 to 30 characters.

      • RDS for PostgreSQL - Must contain from 8 to 128 characters.

      " }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

      The name of the DB parameter group to apply to the DB instance.

      Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot.

      This setting doesn't apply to RDS Custom.

      Default: Uses existing setting

      Constraints: The DB parameter group must be in the same DB parameter group family as the DB instance.

      " + "documentation":"

      The name of the DB parameter group to apply to the DB instance.

      Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot.

      This setting doesn't apply to RDS Custom DB instances.

      Default: Uses existing setting

      Constraints:

      • Must be in the same DB parameter group family as the DB instance.

      " }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

      Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

      These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

      Amazon Aurora

      Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

      Default: Uses existing setting

      Constraints:

      • It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance.

      • It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or later.

      • It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL 9.3.5.

      " + "documentation":"

      The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

      Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

      These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

      This setting doesn't apply to Amazon Aurora DB instances. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

      Default: Uses existing setting

      Constraints:

      • Must be a value from 0 to 35.

      • Can't be set to 0 if the DB instance is a source to read replicas.

      • Can't be set to 0 for an RDS Custom for Oracle DB instance.

      " }, "PreferredBackupWindow":{ "shape":"String", - "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

      Amazon Aurora

      Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi

      • Must be in Universal Time Coordinated (UTC)

      • Must not conflict with the preferred maintenance window

      • Must be at least 30 minutes

      " + "documentation":"

      The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User Guide.

      This setting doesn't apply to Amazon Aurora DB instances. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

      Constraints:

      • Must be in the format hh24:mi-hh24:mi.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred maintenance window.

      • Must be at least 30 minutes.

      " }, "PreferredMaintenanceWindow":{ "shape":"String", - "documentation":"

      The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

      For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.

      Default: Uses existing setting

      Format: ddd:hh24:mi-ddd:hh24:mi

      Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

      Constraints: Must be at least 30 minutes

      " + "documentation":"

      The weekly time range during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter causes a reboot of the DB instance. If you change this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

      For more information, see Amazon RDS Maintenance Window in the Amazon RDS User Guide.

      Default: Uses existing setting

      Constraints:

      • Must be in the format ddd:hh24:mi-ddd:hh24:mi.

      • The day values must be mon | tue | wed | thu | fri | sat | sun.

      • Must be in Universal Coordinated Time (UTC).

      • Must not conflict with the preferred backup window.

      • Must be at least 30 minutes.

      " }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "EngineVersion":{ "shape":"String", - "documentation":"

      The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

      If you specify only a major version, Amazon RDS will update the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

      If the instance that you're modifying is acting as a read replica, the engine version that you specify must be the same or later than the version that the source DB instance or cluster is running.

      In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the PATCH_DB_FAILURE lifecycle.

      " + "documentation":"

      The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

      If you specify only a major version, Amazon RDS updates the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

      If the instance that you're modifying is acting as a read replica, the engine version that you specify must be the same or higher than the version that the source DB instance or cluster is running.

      In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the PATCH_DB_FAILURE lifecycle.

      " }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

      A value that indicates whether major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.

      This setting doesn't apply to RDS Custom.

      Constraints: Major version upgrades must be allowed when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.

      " + "documentation":"

      Specifies whether major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.

      This setting doesn't apply to RDS Custom DB instances.

      Constraints:

      • Major version upgrades must be allowed when specifying a value for the EngineVersion parameter that's a different major version than the DB instance's current version.

      " }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether minor version upgrades are applied automatically to the DB instance during the maintenance window. An outage occurs when all the following conditions are met:

      • The automatic upgrade is enabled for the maintenance window.

      • A newer minor version is available.

      • RDS has enabled automatic patching for the engine version.

      If any of the preceding conditions isn't met, RDS applies the change as soon as possible and doesn't cause an outage.

      For an RDS Custom DB instance, set AutoMinorVersionUpgrade to false. Otherwise, the operation returns an error.

      " + "documentation":"

      Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. An outage occurs when all the following conditions are met:

      • The automatic upgrade is enabled for the maintenance window.

      • A newer minor version is available.

      • RDS has enabled automatic patching for the engine version.

      If any of the preceding conditions isn't met, Amazon RDS applies the change as soon as possible and doesn't cause an outage.

      For an RDS Custom DB instance, don't enable this setting. Otherwise, the operation returns an error.

      " }, "LicenseModel":{ "shape":"String", - "documentation":"

      The license model for the DB instance.

      This setting doesn't apply to RDS Custom.

      Valid values: license-included | bring-your-own-license | general-public-license

      " + "documentation":"

      The license model for the DB instance.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      Valid Values:

      • RDS for MariaDB - general-public-license

      • RDS for Microsoft SQL Server - license-included

      • RDS for MySQL - general-public-license

      • RDS for Oracle - bring-your-own-license | license-included

      • RDS for PostgreSQL - postgresql-license

      " }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

      The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

      Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

      If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

      Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

      Default: Uses existing setting

      " + "documentation":"

      The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

      Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

      If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

      Constraints:

      • For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

      Default: Uses existing setting

      " }, "OptionGroupName":{ "shape":"String", - "documentation":"

      A value that indicates the DB instance should be associated with the specified option group.

      Changing this parameter doesn't result in an outage, with one exception. If the parameter change results in an option group that enables OEM, it can cause a brief period, lasting less than a second, during which new connections are rejected but existing connections aren't interrupted.

      The change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance after it is associated with a DB instance.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The option group to associate the DB instance with.

      Changing this parameter doesn't result in an outage, with one exception. If the parameter change results in an option group that enables OEM, it can cause a brief period, lasting less than a second, during which new connections are rejected but existing connections aren't interrupted.

      The change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

      Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance after it is associated with a DB instance.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "NewDBInstanceIdentifier":{ "shape":"String", - "documentation":"

      The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot occurs immediately if you enable ApplyImmediately, or will occur during the next maintenance window if you disable Apply Immediately. This value is stored as a lowercase string.

      This setting doesn't apply to RDS Custom.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • The first character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: mydbinstance

      " + "documentation":"

      The new identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot occurs immediately if you enable ApplyImmediately, or will occur during the next maintenance window if you disable ApplyImmediately. This value is stored as a lowercase string.

      This setting doesn't apply to RDS Custom DB instances.

      Constraints:

      • Must contain from 1 to 63 letters, numbers, or hyphens.

      • The first character must be a letter.

      • Can't end with a hyphen or contain two consecutive hyphens.

      Example: mydbinstance

      " }, "StorageType":{ "shape":"String", - "documentation":"

      Specifies the storage type to be associated with the DB instance.

      If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

      If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

      Valid values: gp2 | gp3 | io1 | standard

      Default: io1 if the Iops parameter is specified, otherwise gp2

      " + "documentation":"

      The storage type to associate with the DB instance.

      If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

      If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

      Valid Values: gp2 | gp3 | io1 | standard

      Default: io1, if the Iops parameter is specified. Otherwise, gp2.

      " }, "TdeCredentialArn":{ "shape":"String", - "documentation":"

      The ARN from the key store with which to associate the instance for TDE encryption.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The ARN from the key store with which to associate the instance for TDE encryption.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "TdeCredentialPassword":{ "shape":"String", - "documentation":"

      The password for the given ARN from the key store in order to access the device.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The password for the given ARN from the key store in order to access the device.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "CACertificateIdentifier":{ "shape":"String", - "documentation":"

      Specifies the CA certificate identifier to use for the DB instance’s server certificate.

      This setting doesn't apply to RDS Custom.

      For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

      " + "documentation":"

      The CA certificate identifier to use for the DB instance6's server certificate.

      This setting doesn't apply to RDS Custom DB instances.

      For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

      " }, "Domain":{ "shape":"String", - "documentation":"

      The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. You must create the domain before this operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain.

      For more information, see Kerberos Authentication in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. You must create the domain before this operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain.

      For more information, see Kerberos Authentication in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

      Amazon Aurora

      Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster.

      " + "documentation":"

      Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied.

      This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster.

      " }, "MonitoringInterval":{ "shape":"IntegerOptional", - "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0, which is the default.

      If MonitoringRoleArn is specified, set MonitoringInterval to a value other than 0.

      This setting doesn't apply to RDS Custom.

      Valid Values: 0, 1, 5, 10, 15, 30, 60

      " + "documentation":"

      The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0.

      If MonitoringRoleArn is specified, set MonitoringInterval to a value other than 0.

      This setting doesn't apply to RDS Custom DB instances.

      Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60

      Default: 0

      " }, "DBPortNumber":{ "shape":"IntegerOptional", - "documentation":"

      The port number on which the database accepts connections.

      The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

      If you change the DBPortNumber value, your database restarts regardless of the value of the ApplyImmediately parameter.

      This setting doesn't apply to RDS Custom.

      MySQL

      Default: 3306

      Valid values: 1150-65535

      MariaDB

      Default: 3306

      Valid values: 1150-65535

      PostgreSQL

      Default: 5432

      Valid values: 1150-65535

      Type: Integer

      Oracle

      Default: 1521

      Valid values: 1150-65535

      SQL Server

      Default: 1433

      Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

      Amazon Aurora

      Default: 3306

      Valid values: 1150-65535

      " + "documentation":"

      The port number on which the database accepts connections.

      The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

      If you change the DBPortNumber value, your database restarts regardless of the value of the ApplyImmediately parameter.

      This setting doesn't apply to RDS Custom DB instances.

      Valid Values: 1150-65535

      Default:

      • Amazon Aurora - 3306

      • RDS for MariaDB - 3306

      • RDS for Microsoft SQL Server - 1433

      • RDS for MySQL - 3306

      • RDS for Oracle - 1521

      • RDS for PostgreSQL - 5432

      Constraints:

      • For RDS for Microsoft SQL Server, the value can't be 1234, 1434, 3260, 3343, 3389, 47001, or 49152-49156.

      " }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

      Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

      " + "documentation":"

      Specifies whether the DB instance is publicly accessible.

      When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

      When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

      PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

      Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

      " }, "MonitoringRoleArn":{ "shape":"String", - "documentation":"

      The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide.

      If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

      The name of the IAM role to use when making API calls to the Directory Service.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The name of the IAM role to use when making API calls to the Directory Service.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PromotionTier":{ "shape":"IntegerOptional", - "documentation":"

      A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      This setting doesn't apply to RDS Custom.

      Default: 1

      Valid Values: 0 - 15

      " + "documentation":"

      The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      Default: 1

      Valid Values: 0 - 15

      " }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      This setting doesn't apply to Amazon Aurora. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

      For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled.

      This setting doesn't apply to Amazon Aurora. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

      For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable Performance Insights for the DB instance.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether to enable Performance Insights for the DB instance.

      For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PerformanceInsightsKMSKeyId":{ "shape":"String", - "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

      If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "PerformanceInsightsRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

      • 7

      • month * 31, where month is a number of months from 1-23

      • 731

      For example, the following values are valid:

      • 93 (3 months * 31)

      • 341 (11 months * 31)

      • 589 (19 months * 31)

      • 731

      If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The number of days to retain Performance Insights data.

      This setting doesn't apply to RDS Custom DB instances.

      Valid Values:

      • 7

      • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

      • 731

      Default: 7 days

      If you specify a retention period that isn't valid, such as 94, Amazon RDS returns an error.

      " }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", - "documentation":"

      The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance.

      A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The log types to be enabled for export to CloudWatch Logs for a specific DB instance.

      A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "ProcessorFeatures":{ "shape":"ProcessorFeatureList", - "documentation":"

      The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "UseDefaultProcessorFeatures":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance class of the DB instance uses its default processor features.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether the DB instance class of the DB instance uses its default processor features.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

      " + "documentation":"

      Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance.

      " }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

      The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

      For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

      For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "CertificateRotationRestart":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate.

      By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.

      Set this parameter only if you are not using SSL/TLS to connect to the DB instance.

      If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate.

      By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.

      Set this parameter only if you are not using SSL/TLS to connect to the DB instance.

      If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:

      This setting doesn't apply to RDS Custom DB instances.

      " }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

      A value that sets the open mode of a replica database to either mounted or read-only.

      Currently, this parameter is only supported for Oracle DB instances.

      Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      A value that sets the open mode of a replica database to either mounted or read-only.

      Currently, this parameter is only supported for Oracle DB instances.

      Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " + "documentation":"

      Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.

      A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network.

      For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

      For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

      " }, "AwsBackupRecoveryPointArn":{ "shape":"AwsBackupRecoveryPointArn", - "documentation":"

      The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

      This setting doesn't apply to RDS Custom.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

      This setting doesn't apply to RDS Custom DB instances.

      " }, "AutomationMode":{ "shape":"AutomationMode", - "documentation":"

      The automation mode of the RDS Custom DB instance: full or all paused. If full, the DB instance automates monitoring and instance recovery. If all paused, the instance pauses automation for the duration set by ResumeFullAutomationModeMinutes.

      " + "documentation":"

      The automation mode of the RDS Custom DB instance. If full, the DB instance automates monitoring and instance recovery. If all paused, the instance pauses automation for the duration set by ResumeFullAutomationModeMinutes.

      " }, "ResumeFullAutomationModeMinutes":{ "shape":"IntegerOptional", - "documentation":"

      The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60 (default). The maximum value is 1,440.

      " + "documentation":"

      The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation.

      Default: 60

      Constraints:

      • Must be at least 60.

      • Must be no more than 1,440.

      " }, "NetworkType":{ "shape":"String", - "documentation":"

      The network type of the DB instance.

      Valid values:

      • IPV4

      • DUAL

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      " + "documentation":"

      The network type of the DB instance.

      The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

      For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

      Valid Values: IPV4 | DUAL

      " }, "StorageThroughput":{ "shape":"IntegerOptional", - "documentation":"

      Specifies the storage throughput value for the DB instance.

      This setting applies only to the gp3 storage type.

      This setting doesn't apply to RDS Custom or Amazon Aurora.

      " + "documentation":"

      The storage throughput value for the DB instance.

      This setting applies only to the gp3 storage type.

      This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

      " }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager.

      If the DB instance doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

      If the DB instance already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      " + "documentation":"

      Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

      If the DB instance doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword.

      If the DB instance already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

      " }, "RotateMasterUserPassword":{ "shape":"BooleanOptional", - "documentation":"

      A value that indicates whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • You must apply the change immediately when rotating the master user password.

      " + "documentation":"

      Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

      This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password.

      For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

      Constraints:

      • You must apply the change immediately when rotating the master user password.

      " }, "MasterUserSecretKmsKeyId":{ "shape":"String", "documentation":"

      The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

      This setting is valid only if both of the following conditions are met:

      • The DB instance doesn't manage the master user password in Amazon Web Services Secrets Manager.

        If the DB instance already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key used to encrypt the secret.

      • You are turning on ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager.

        If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

      The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

      There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

      " + }, + "Engine":{ + "shape":"String", + "documentation":"

      The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB to an Oracle Database 21c CDB.

      Note the following requirements:

      • Make sure that you specify oracle-ee-cdb or oracle-se2-cdb.

      • Make sure that your DB engine runs Oracle Database 19c with an April 2021 or later RU.

      Note the following limitations:

      • You can't convert a CDB to a non-CDB.

      • You can't convert a replica database.

      • You can't convert a non-CDB to a CDB and upgrade the engine version in the same command.

      • You can't convert the existing custom parameter or option group when it has options or parameters that are permanent or persistent. In this situation, the DB instance reverts to the default option and parameter group. To avoid reverting to the default, specify a new parameter group with --db-parameter-group-name and a new option group with --option-group-name.

      " } }, "documentation":"

      " @@ -10911,7 +10915,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

      The engine version to upgrade the DB snapshot to.

      The following are the database engines and engine versions that are available when you upgrade a DB snapshot.

      MySQL

      • 5.5.46 (supported for 5.1 DB snapshots)

      Oracle

      • 12.1.0.2.v8 (supported for 12.1.0.1 DB snapshots)

      • 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots)

      • 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots)

      PostgreSQL

      For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.

      " + "documentation":"

      The engine version to upgrade the DB snapshot to.

      The following are the database engines and engine versions that are available when you upgrade a DB snapshot.

      MySQL

      • 5.5.46 (supported for 5.1 DB snapshots)

      Oracle

      • 19.0.0.0.ru-2022-01.rur-2022-01.r1 (supported for 12.2.0.1 DB snapshots)

      • 19.0.0.0.ru-2022-07.rur-2022-07.r1 (supported for 12.1.0.2 DB snapshots)

      • 12.1.0.2.v8 (supported for 12.1.0.1 DB snapshots)

      • 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots)

      • 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots)

      PostgreSQL

      For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.

      " }, "OptionGroupName":{ "shape":"String", @@ -11889,6 +11893,10 @@ "StorageThroughput":{ "shape":"IntegerOptional", "documentation":"

      The storage throughput of the DB instance.

      " + }, + "Engine":{ + "shape":"String", + "documentation":"

      The database engine of the DB instance.

      " } }, "documentation":"

      This data type is used as a response element in the ModifyDBInstance operation and contains changes that will be applied during the next maintenance window.

      " @@ -13601,7 +13609,7 @@ "documentation":"

      The number of seconds before scaling times out. What happens when an attempted scaling action times out is determined by the TimeoutAction setting.

      " } }, - "documentation":"

      Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

      For more information, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.

      " + "documentation":"

      The scaling configuration for an Aurora DB cluster in serverless DB engine mode.

      For more information, see Using Amazon Aurora Serverless v1 in the Amazon Aurora User Guide.

      " }, "ServerlessV2ScalingConfiguration":{ "type":"structure", @@ -13629,7 +13637,7 @@ "documentation":"

      The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on. The largest value that you can use is 128.

      " } }, - "documentation":"

      Shows the scaling configuration for an Aurora Serverless v2 DB cluster.

      For more information, see Using Amazon Aurora Serverless v2 in the Amazon Aurora User Guide.

      " + "documentation":"

      The scaling configuration for an Aurora Serverless v2 DB cluster.

      For more information, see Using Amazon Aurora Serverless v2 in the Amazon Aurora User Guide.

      " }, "SharedSnapshotQuotaExceededFault":{ "type":"structure", diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index dda5fbef412f..6a6b821a0d9d 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 5cc8ee5b76b5..36b5d48a1356 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json index fb0c8cf49517..af8ff95a9d0f 100644 --- a/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -349,44 +349,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://redshift.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://redshift.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json b/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json index 5603237a0d85..a065a7553eea 100644 --- a/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": true, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -502,9 +502,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -515,9 +515,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -528,9 +528,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -541,9 +541,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-iso-west-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -554,9 +565,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -567,9 +589,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -580,9 +613,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -593,9 +637,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -607,8 +651,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -618,9 +662,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -630,11 +674,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/redshift/src/main/resources/codegen-resources/paginators-1.json b/services/redshift/src/main/resources/codegen-resources/paginators-1.json index f2cb73e2214e..9fe0f863f2f7 100644 --- a/services/redshift/src/main/resources/codegen-resources/paginators-1.json +++ b/services/redshift/src/main/resources/codegen-resources/paginators-1.json @@ -54,6 +54,12 @@ "output_token": "Marker", "result_key": "Clusters" }, + "DescribeCustomDomainAssociations": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Associations" + }, "DescribeDataShares": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/redshift/src/main/resources/codegen-resources/service-2.json b/services/redshift/src/main/resources/codegen-resources/service-2.json index 647883d62f27..74695183016f 100644 --- a/services/redshift/src/main/resources/codegen-resources/service-2.json +++ b/services/redshift/src/main/resources/codegen-resources/service-2.json @@ -356,6 +356,24 @@ ], "documentation":"

      Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

      For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

      " }, + "CreateCustomDomainAssociation":{ + "name":"CreateCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomDomainAssociationMessage"}, + "output":{ + "shape":"CreateCustomDomainAssociationResult", + "resultWrapper":"CreateCustomDomainAssociationResult" + }, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

      Used to create a custom domain name for a cluster. Properties include the custom domain name, the cluster the custom domain is associated with, and the certificate Amazon Resource Name (ARN).

      " + }, "CreateEndpointAccess":{ "name":"CreateEndpointAccess", "http":{ @@ -655,6 +673,20 @@ ], "documentation":"

      Deletes the specified cluster subnet group.

      " }, + "DeleteCustomDomainAssociation":{ + "name":"DeleteCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomDomainAssociationMessage"}, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

      Contains information about deleting a custom domain association for a cluster.

      " + }, "DeleteEndpointAccess":{ "name":"DeleteEndpointAccess", "http":{ @@ -977,6 +1009,23 @@ ], "documentation":"

      Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

      If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

      If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

      " }, + "DescribeCustomDomainAssociations":{ + "name":"DescribeCustomDomainAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomDomainAssociationsMessage"}, + "output":{ + "shape":"CustomDomainAssociationsMessage", + "resultWrapper":"DescribeCustomDomainAssociationsResult" + }, + "errors":[ + {"shape":"CustomDomainAssociationNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

      Contains information for custom domain associations for a cluster.

      " + }, "DescribeDataShares":{ "name":"DescribeDataShares", "http":{ @@ -1640,7 +1689,9 @@ {"shape":"InvalidElasticIpFault"}, {"shape":"TableLimitExceededFault"}, {"shape":"InvalidClusterTrackFault"}, - {"shape":"InvalidRetentionPeriodFault"} + {"shape":"InvalidRetentionPeriodFault"}, + {"shape":"UnsupportedOperationFault"}, + {"shape":"CustomCnameAssociationFault"} ], "documentation":"

      Modifies the settings for a cluster.

      You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

      You can add another security or parameter group, or change the admin user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

      " }, @@ -1766,6 +1817,24 @@ ], "documentation":"

      Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

      " }, + "ModifyCustomDomainAssociation":{ + "name":"ModifyCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCustomDomainAssociationMessage"}, + "output":{ + "shape":"ModifyCustomDomainAssociationResult", + "resultWrapper":"ModifyCustomDomainAssociationResult" + }, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

      Contains information for changing a custom domain association.

      " + }, "ModifyEndpointAccess":{ "name":"ModifyEndpointAccess", "http":{ @@ -2348,6 +2417,32 @@ "locationName":"ClusterAssociatedToSchedule" } }, + "Association":{ + "type":"structure", + "members":{ + "CustomDomainCertificateArn":{ + "shape":"String", + "documentation":"

      The Amazon Resource Name (ARN) for the certificate associated with the custom domain.

      " + }, + "CustomDomainCertificateExpiryDate":{ + "shape":"TStamp", + "documentation":"

      The expiration date for the certificate.

      " + }, + "CertificateAssociations":{ + "shape":"CertificateAssociationList", + "documentation":"

      A list of all associated clusters and domain names tied to a specific certificate.

      " + } + }, + "documentation":"

      Contains information about the custom domain name association.

      ", + "wrapper":true + }, + "AssociationList":{ + "type":"list", + "member":{ + "shape":"Association", + "locationName":"Association" + } + }, "AttributeList":{ "type":"list", "member":{ @@ -2559,7 +2654,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

      The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " + "documentation":"

      The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " }, "AccountWithRestoreAccess":{ "shape":"String", @@ -2712,6 +2807,27 @@ } } }, + "CertificateAssociation":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"String", + "documentation":"

      The custom domain name for the certificate association.

      " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The cluster identifier for the certificate association.

      " + } + }, + "documentation":"

      A cluster ID and custom domain name tied to a specific certificate. These are typically returned in a list.

      " + }, + "CertificateAssociationList":{ + "type":"list", + "member":{ + "shape":"CertificateAssociation", + "locationName":"CertificateAssociation" + } + }, "Cluster":{ "type":"structure", "members":{ @@ -2922,6 +3038,18 @@ "ReservedNodeExchangeStatus":{ "shape":"ReservedNodeExchangeStatus", "documentation":"

      The status of the reserved-node exchange request. Statuses include in-progress and requested.

      " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

      The custom domain name associated with the cluster.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"String", + "documentation":"

      The certificate Amazon Resource Name (ARN) for the custom domain name.

      " + }, + "CustomDomainCertificateExpiryDate":{ + "shape":"TStamp", + "documentation":"

      The expiration date for the certificate associated with the custom domain name.

      " } }, "documentation":"

      Describes a cluster.

      ", @@ -3604,7 +3732,7 @@ }, "SourceSnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

      The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      Constraints:

      • Must be the identifier for a valid cluster.

      " + "documentation":"

      The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      Constraints:

      • Must be the identifier for a valid cluster.

      " }, "TargetSnapshotIdentifier":{ "shape":"String", @@ -3692,11 +3820,11 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

      The user name associated with the admin user for the cluster that is being created.

      Constraints:

      • Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC.

      • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

      • The first character must be a letter.

      • Must not contain a colon (:) or a slash (/).

      • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

      " + "documentation":"

      The user name associated with the admin user account for the cluster that is being created.

      Constraints:

      • Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC.

      • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

      • The first character must be a letter.

      • Must not contain a colon (:) or a slash (/).

      • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

      " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The password associated with the admin user for the cluster that is being created.

      Constraints:

      • Must be between 8 and 64 characters in length.

      • Must contain at least one uppercase letter.

      • Must contain at least one lowercase letter.

      • Must contain one number.

      • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

      " + "documentation":"

      The password associated with the admin user account for the cluster that is being created.

      Constraints:

      • Must be between 8 and 64 characters in length.

      • Must contain at least one uppercase letter.

      • Must contain at least one lowercase letter.

      • Must contain one number.

      • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

      " }, "ClusterSecurityGroups":{ "shape":"ClusterSecurityGroupNameList", @@ -3945,6 +4073,49 @@ "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} } }, + "CreateCustomDomainAssociationMessage":{ + "type":"structure", + "required":[ + "CustomDomainName", + "CustomDomainCertificateArn", + "ClusterIdentifier" + ], + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

      The custom domain name for a custom domain association.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

      The certificate Amazon Resource Name (ARN) for the custom domain name association.

      " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The cluster identifier that the custom domain is associated with.

      " + } + } + }, + "CreateCustomDomainAssociationResult":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

      The custom domain name for the association result.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

      The Amazon Resource Name (ARN) for the certificate associated with the custom domain name.

      " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the cluster that the custom domain is associated with.

      " + }, + "CustomDomainCertExpiryTime":{ + "shape":"String", + "documentation":"

      The expiration time for the certificate for the custom domain.

      " + } + } + }, "CreateEndpointAccessMessage":{ "type":"structure", "required":[ @@ -4245,6 +4416,55 @@ } } }, + "CustomCnameAssociationFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

      An error occurred when an attempt was made to change the custom domain association.

      ", + "error":{ + "code":"CustomCnameAssociationFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CustomDomainAssociationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

      An error occurred. The custom domain name couldn't be found.

      ", + "error":{ + "code":"CustomDomainAssociationNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CustomDomainAssociationsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

      The marker for the custom domain association.

      " + }, + "Associations":{ + "shape":"AssociationList", + "documentation":"

      The associations for the custom domain.

      " + } + } + }, + "CustomDomainCertificateArnString":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:[\\w+=/,.@-]+:acm:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=,.@-]+)*" + }, + "CustomDomainNameString":{ + "type":"string", + "max":253, + "min":1, + "pattern":"^((?!-)[A-Za-z0-9-]{1,63}(?The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      Constraints: Must be the name of valid cluster.

      " + "documentation":"

      The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      Constraints: Must be the name of valid cluster.

      " } }, "documentation":"

      " @@ -4552,6 +4772,16 @@ }, "documentation":"

      " }, + "DeleteCustomDomainAssociationMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the cluster to delete a custom domain association for.

      " + } + } + }, "DeleteEndpointAccessMessage":{ "type":"structure", "required":["EndpointName"], @@ -4826,7 +5056,7 @@ }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

      The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

      Default: 100

      Constraints: minimum 20, maximum 500.

      " + "documentation":"

      The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

      Default: 100

      Constraints: minimum 20, maximum 100.

      " }, "Marker":{ "shape":"String", @@ -4946,6 +5176,27 @@ }, "documentation":"

      " }, + "DescribeCustomDomainAssociationsMessage":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

      The custom domain name for the custom domain association.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

      The certificate Amazon Resource Name (ARN) for the custom domain association.

      " + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

      The maximum records setting for the associated custom domain.

      " + }, + "Marker":{ + "shape":"String", + "documentation":"

      The marker for the custom domain association.

      " + } + } + }, "DescribeDataSharesForConsumerMessage":{ "type":"structure", "members":{ @@ -6218,10 +6469,7 @@ }, "GetClusterCredentialsMessage":{ "type":"structure", - "required":[ - "DbUser", - "ClusterIdentifier" - ], + "required":["DbUser"], "members":{ "DbUser":{ "shape":"String", @@ -6246,13 +6494,16 @@ "DbGroups":{ "shape":"DbGroupList", "documentation":"

      A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.

      Database group name constraints

      • Must be 1 to 64 alphanumeric characters or hyphens

      • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

      • First character must be a letter.

      • Must not contain a colon ( : ) or slash ( / ).

      • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

      " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

      The custom domain name for the cluster credentials.

      " } }, "documentation":"

      The request parameters to get cluster credentials.

      " }, "GetClusterCredentialsWithIAMMessage":{ "type":"structure", - "required":["ClusterIdentifier"], "members":{ "DbName":{ "shape":"String", @@ -6265,6 +6516,10 @@ "DurationSeconds":{ "shape":"IntegerOptional", "documentation":"

      The number of seconds until the returned temporary password expires.

      Range: 900-3600. Default: 900.

      " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

      The custom domain name for the IAM message cluster credentials.

      " } } }, @@ -7230,7 +7485,7 @@ }, "MasterUserPassword":{ "shape":"String", - "documentation":"

      The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

      Operations never return the password, so this operation provides a way to regain access to the admin user for a cluster if the password is lost.

      Default: Uses existing setting.

      Constraints:

      • Must be between 8 and 64 characters in length.

      • Must contain at least one uppercase letter.

      • Must contain at least one lowercase letter.

      • Must contain one number.

      • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

      " + "documentation":"

      The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

      Operations never return the password, so this operation provides a way to regain access to the admin user account for a cluster if the password is lost.

      Default: Uses existing setting.

      Constraints:

      • Must be between 8 and 64 characters in length.

      • Must contain at least one uppercase letter.

      • Must contain at least one lowercase letter.

      • Must contain one number.

      • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

      " }, "ClusterParameterGroupName":{ "shape":"String", @@ -7401,6 +7656,45 @@ "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} } }, + "ModifyCustomDomainAssociationMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

      The custom domain name for a changed custom domain association.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

      The certificate Amazon Resource Name (ARN) for the changed custom domain association.

      " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the cluster to change a custom domain association for.

      " + } + } + }, + "ModifyCustomDomainAssociationResult":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

      The custom domain name associated with the result for the changed custom domain association.

      " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

      The certificate Amazon Resource Name (ARN) associated with the result for the changed custom domain association.

      " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

      The identifier of the cluster associated with the result for the changed custom domain association.

      " + }, + "CustomDomainCertExpiryTime":{ + "shape":"String", + "documentation":"

      The certificate expiration time associated with the result for the changed custom domain association.

      " + } + } + }, "ModifyEndpointAccessMessage":{ "type":"structure", "required":["EndpointName"], @@ -8575,7 +8869,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

      The name of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " + "documentation":"

      The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " }, "Port":{ "shape":"IntegerOptional", @@ -8893,7 +9187,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

      The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " + "documentation":"

      The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

      " }, "AccountWithRestoreAccess":{ "shape":"String", diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index b0066aa0568f..436303fbf6dd 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index e36cedf945f0..b67063e1af22 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index a13fc52c12fb..d43861467e9f 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json index 8874e7e7f4f8..6ac67e68e21b 100644 --- a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json @@ -86,6 +86,12 @@ "input_token": "NextToken", "limit_key": "MaxResults", "output_token": "NextToken" + }, + "ListUsers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Users" } } } \ No newline at end of file diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 633410f12449..2aff6686e930 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,27 @@ "uid":"rekognition-2016-06-27" }, "operations":{ + "AssociateFaces":{ + "name":"AssociateFaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFacesRequest"}, + "output":{"shape":"AssociateFacesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100.

      The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

      If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

      The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

      • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

      • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

      • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

      " + }, "CompareFaces":{ "name":"CompareFaces", "http":{ @@ -171,6 +192,27 @@ ], "documentation":"

      Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video.

      Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels.

      • If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must use the FaceSearch option in Settings, specifying the collection that contains the faces you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing.

      • If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect by using the ConnectedHome option in settings, and selecting one of the following: PERSON, PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time.

      Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field.

      This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.

      " }, + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserRequest"}, + "output":{"shape":"CreateUserResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Creates a new User within a collection specified by CollectionId. Takes UserId as a parameter, which is a user provided ID which should be unique within the collection. The provided UserId will alias the system generated UUID to make the UserId more user friendly.

      Uses a ClientToken, an idempotency token that ensures a call to CreateUser completes only once. If the value is not supplied, the AWS SDK generates an idempotency token for the requests. This prevents retries after a network error results from making multiple CreateUser calls.

      " + }, "DeleteCollection":{ "name":"DeleteCollection", "http":{ @@ -303,6 +345,26 @@ ], "documentation":"

      Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with CreateStreamProcessor. You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.

      " }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserRequest"}, + "output":{"shape":"DeleteUserResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Deletes the specified UserID within the collection. Faces that are associated with the UserID are disassociated from the UserID before deleting the specified UserID. If the specified Collection or UserID is already deleted or not found, a ResourceNotFoundException will be thrown. If the action is successful with a 200 response, an empty HTTP body is returned.

      " + }, "DescribeCollection":{ "name":"DescribeCollection", "http":{ @@ -518,6 +580,26 @@ ], "documentation":"

      Detects text in the input image and converts it into machine-readable text.

      Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

      The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

      A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image.

      A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

      To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

      To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

      For more information, see Detecting text in the Amazon Rekognition Developer Guide.

      " }, + "DisassociateFaces":{ + "name":"DisassociateFaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateFacesRequest"}, + "output":{"shape":"DisassociateFacesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Removes the association between a Face supplied in an array of FaceIds and the User. If the User is not present already, then a ResourceNotFound exception is thrown. If successful, an array of faces that are disassociated from the User is returned. If a given face is already disassociated from the given UserID, it will be ignored and not be returned in the response. If a given face is already associated with a different User or not found in the collection it will be returned as part of UnsuccessfulDisassociations. You can remove 1 - 100 face IDs from a user at one time.

      " + }, "DistributeDatasetEntries":{ "name":"DistributeDatasetEntries", "http":{ @@ -882,6 +964,25 @@ ], "documentation":"

      Returns a list of tags in an Amazon Rekognition collection, stream processor, or Custom Labels model.

      This operation requires permissions to perform the rekognition:ListTagsForResource action.

      " }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Returns metadata of the User such as UserID in the specified collection. Anonymous User (to reserve faces without any identity) is not returned as part of this request. The results are sorted by system generated primary key ID. If the response is truncated, NextToken is returned in the response that can be used in the subsequent request to retrieve the next set of identities.

      " + }, "PutProjectPolicy":{ "name":"PutProjectPolicy", "http":{ @@ -965,6 +1066,45 @@ ], "documentation":"

      For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

      To search for all faces in an input image, you might first call the IndexFaces operation, and then use the face IDs returned in subsequent calls to the SearchFaces operation.

      You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

      You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

      The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

      If no faces are detected in the input image, SearchFacesByImage returns an InvalidParameterException error.

      For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

      The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar for filtering by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE.

      To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

      This operation requires permissions to perform the rekognition:SearchFacesByImage action.

      " }, + "SearchUsers":{ + "name":"SearchUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchUsersRequest"}, + "output":{"shape":"SearchUsersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Searches for UserIDs within a collection based on a FaceId or UserId. This API can be used to find the closest UserID (with a highest similarity) to associate a face. The request must be provided with either FaceId or UserId. The operation returns an array of UserID that match the FaceId or UserId, ordered by similarity score with the highest similarity first.

      " + }, + "SearchUsersByImage":{ + "name":"SearchUsersByImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchUsersByImageRequest"}, + "output":{"shape":"SearchUsersByImageResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidImageFormatException"}, + {"shape":"InvalidS3ObjectException"}, + {"shape":"ImageTooLargeException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Searches for UserIDs using a supplied image. It first detects the largest face in the image, and then searches a specified collection for matching UserIDs.

      The operation returns an array of UserIDs that match the face in the supplied image, ordered by similarity score with the highest similarity first. It also returns a bounding box for the face found in the input image.

      Information about faces detected in the supplied image, but not used for the search, is returned in an array of UnsearchedFace objects. If no valid face is detected in the image, the response will contain an empty UserMatches list and no SearchedFace object.

      " + }, "StartCelebrityRecognition":{ "name":"StartCelebrityRecognition", "http":{ @@ -1329,6 +1469,70 @@ "type":"list", "member":{"shape":"Asset"} }, + "AssociateFacesRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId", + "FaceIds" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection containing the UserID.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      The ID for the existing UserID.

      " + }, + "FaceIds":{ + "shape":"UserFaceIdList", + "documentation":"

      An array of FaceIDs to associate with the UserID.

      " + }, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

      An optional value specifying the minimum confidence in the UserID match to return. The default value is 75.

      " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

      Idempotent token used to identify the request to AssociateFaces. If you use the same token with multiple AssociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

      ", + "idempotencyToken":true + } + } + }, + "AssociateFacesResponse":{ + "type":"structure", + "members":{ + "AssociatedFaces":{ + "shape":"AssociatedFacesList", + "documentation":"

      An array of AssociatedFace objects containing FaceIDs that are successfully associated with the UserID is returned. Returned if the AssociateFaces action is successful.

      " + }, + "UnsuccessfulFaceAssociations":{ + "shape":"UnsuccessfulFaceAssociationList", + "documentation":"

      An array of UnsuccessfulAssociation objects containing FaceIDs that are not successfully associated along with the reasons. Returned if the AssociateFaces action is successful.

      " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

      The status of an update made to a UserID. Reflects if the UserID has been updated for every requested change.

      " + } + } + }, + "AssociatedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      Unique identifier assigned to the face.

      " + } + }, + "documentation":"

      Provides face metadata for the faces that are associated to a specific UserID.

      " + }, + "AssociatedFacesList":{ + "type":"list", + "member":{"shape":"AssociatedFace"}, + "max":100, + "min":0 + }, "Attribute":{ "type":"string", "enum":[ @@ -1703,6 +1907,13 @@ }, "documentation":"

      Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.

      " }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"

      A User with the same Id already exists within the collection, or the update or deletion of the User caused an inconsistent state. **

      ", + "exception":true + }, "ConnectedHomeLabel":{"type":"string"}, "ConnectedHomeLabels":{ "type":"list", @@ -2084,6 +2295,33 @@ } } }, + "CreateUserRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection to which the new UserID needs to be created.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      ID for the UserID to be created. This ID needs to be unique within the collection.

      " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

      Idempotent token used to identify the request to CreateUser. If you use the same token with multiple CreateUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

      ", + "idempotencyToken":true + } + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + } + }, "CustomLabel":{ "type":"structure", "members":{ @@ -2364,6 +2602,10 @@ "DeletedFaces":{ "shape":"FaceIdList", "documentation":"

      An array of strings (face IDs) of the faces that were deleted.

      " + }, + "UnsuccessfulFaceDeletions":{ + "shape":"UnsuccessfulFaceDeletionsList", + "documentation":"

      An array of any faces that weren't deleted.

      " } } }, @@ -2446,6 +2688,33 @@ "members":{ } }, + "DeleteUserRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection from which the UserID needs to be deleted.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      ID for the UserID to be deleted.

      " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

      Idempotent token used to identify the request to DeleteUser. If you use the same token with multiple DeleteUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

      ", + "idempotencyToken":true + } + } + }, + "DeleteUserResponse":{ + "type":"structure", + "members":{ + } + }, "DescribeCollectionRequest":{ "type":"structure", "required":["CollectionId"], @@ -2474,6 +2743,10 @@ "CreationTimestamp":{ "shape":"DateTime", "documentation":"

      The number of milliseconds since the Unix epoch time until the creation of the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970.

      " + }, + "UserCount":{ + "shape":"ULong", + "documentation":"

      The number of UserIDs assigned to the specified colleciton.

      " } } }, @@ -2966,6 +3239,66 @@ }, "documentation":"

      A set of parameters that allow you to filter out certain results from your returned results.

      " }, + "DisassociateFacesRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId", + "FaceIds" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection containing the UserID.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      ID for the existing UserID.

      " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

      Idempotent token used to identify the request to DisassociateFaces. If you use the same token with multiple DisassociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

      ", + "idempotencyToken":true + }, + "FaceIds":{ + "shape":"UserFaceIdList", + "documentation":"

      An array of face IDs to disassociate from the UserID.

      " + } + } + }, + "DisassociateFacesResponse":{ + "type":"structure", + "members":{ + "DisassociatedFaces":{ + "shape":"DisassociatedFacesList", + "documentation":"

      An array of DissociatedFace objects containing FaceIds that are successfully disassociated with the UserID is returned. Returned if the DisassociatedFaces action is successful.

      " + }, + "UnsuccessfulFaceDisassociations":{ + "shape":"UnsuccessfulFaceDisassociationList", + "documentation":"

      An array of UnsuccessfulDisassociation objects containing FaceIds that are not successfully associated, along with the reasons for the failure to associate. Returned if the DisassociateFaces action is successful.

      " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

      The status of an update made to a User. Reflects if the User has been updated for every requested change.

      " + } + } + }, + "DisassociatedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      Unique identifier assigned to the face.

      " + } + }, + "documentation":"

      Provides face metadata for the faces that are disassociated from a specific UserID.

      " + }, + "DisassociatedFacesList":{ + "type":"list", + "member":{"shape":"DisassociatedFace"}, + "max":100, + "min":0 + }, "DistributeDataset":{ "type":"structure", "required":["Arn"], @@ -3190,6 +3523,10 @@ "IndexFacesModelVersion":{ "shape":"IndexFacesModelVersion", "documentation":"

      The version of the face detect and storage model that was used when indexing the face vector.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      Unique identifier assigned to the user.

      " } }, "documentation":"

      Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.

      " @@ -4711,6 +5048,14 @@ "MaxResults":{ "shape":"PageSize", "documentation":"

      Maximum number of faces to return.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      An array of user IDs to match when listing faces in a collection.

      " + }, + "FaceIds":{ + "shape":"FaceIdList", + "documentation":"

      An array of face IDs to match when listing faces in a collection.

      " } } }, @@ -4812,6 +5157,37 @@ } } }, + "ListUsersRequest":{ + "type":"structure", + "required":["CollectionId"], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection.

      " + }, + "MaxResults":{ + "shape":"MaxUserResults", + "documentation":"

      Maximum number of UsersID to return.

      " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

      Pagingation token to receive the next set of UsersID.

      " + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "Users":{ + "shape":"UserList", + "documentation":"

      List of UsersID associated with the specified collection.

      " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

      A pagination token to be used with the subsequent request if the response is truncated.

      " + } + } + }, "LivenessImageBlob":{ "type":"blob", "max":204800, @@ -4861,6 +5237,20 @@ "documentation":"

      The format of the project policy document that you supplied to PutProjectPolicy is incorrect.

      ", "exception":true }, + "MatchedUser":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the UserID. Unique within the collection.

      " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

      The status of the user matched to a provided FaceID.

      " + } + }, + "documentation":"

      Contains metadata for a UserID matched with a given face.

      " + }, "MaxDurationInSecondsULong":{ "type":"long", "max":120, @@ -4884,6 +5274,11 @@ "type":"integer", "min":1 }, + "MaxUserResults":{ + "type":"integer", + "max":500, + "min":1 + }, "MinCoveragePercentage":{ "type":"float", "max":100, @@ -5722,6 +6117,127 @@ } } }, + "SearchUsersByImageRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "Image" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection containing the UserID.

      " + }, + "Image":{"shape":"Image"}, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

      Specifies the minimum confidence in the UserID match to return. Default value is 80.

      " + }, + "MaxUsers":{ + "shape":"MaxUserResults", + "documentation":"

      Maximum number of UserIDs to return.

      " + }, + "QualityFilter":{ + "shape":"QualityFilter", + "documentation":"

      A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't searched for in the collection. The default value is NONE.

      " + } + } + }, + "SearchUsersByImageResponse":{ + "type":"structure", + "members":{ + "UserMatches":{ + "shape":"UserMatchList", + "documentation":"

      An array of UserID objects that matched the input face, along with the confidence in the match. The returned structure will be empty if there are no matches. Returned if the SearchUsersByImageResponse action is successful.

      " + }, + "FaceModelVersion":{ + "shape":"String", + "documentation":"

      Version number of the face detection model associated with the input collection CollectionId.

      " + }, + "SearchedFace":{ + "shape":"SearchedFaceDetails", + "documentation":"

      A list of FaceDetail objects containing the BoundingBox for the largest face in image, as well as the confidence in the bounding box, that was searched for matches. If no valid face is detected in the image the response will contain no SearchedFace object.

      " + }, + "UnsearchedFaces":{ + "shape":"UnsearchedFacesList", + "documentation":"

      List of UnsearchedFace objects. Contains the face details infered from the specified image but not used for search. Contains reasons that describe why a face wasn't used for Search.

      " + } + } + }, + "SearchUsersRequest":{ + "type":"structure", + "required":["CollectionId"], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

      The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a FaceId is provided, UserId isn’t required to be present in the Collection.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      ID for the existing User.

      " + }, + "FaceId":{ + "shape":"FaceId", + "documentation":"

      ID for the existing face.

      " + }, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

      Optional value that specifies the minimum confidence in the matched UserID to return. Default value of 80.

      " + }, + "MaxUsers":{ + "shape":"MaxUserResults", + "documentation":"

      Maximum number of identities to return.

      " + } + } + }, + "SearchUsersResponse":{ + "type":"structure", + "members":{ + "UserMatches":{ + "shape":"UserMatchList", + "documentation":"

      An array of UserMatch objects that matched the input face along with the confidence in the match. Array will be empty if there are no matches.

      " + }, + "FaceModelVersion":{ + "shape":"String", + "documentation":"

      Version number of the face detection model associated with the input CollectionId.

      " + }, + "SearchedFace":{ + "shape":"SearchedFace", + "documentation":"

      Contains the ID of a face that was used to search for matches in a collection.

      " + }, + "SearchedUser":{ + "shape":"SearchedUser", + "documentation":"

      Contains the ID of the UserID that was used to search for matches in a collection.

      " + } + } + }, + "SearchedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      Unique identifier assigned to the face.

      " + } + }, + "documentation":"

      Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for search.

      " + }, + "SearchedFaceDetails":{ + "type":"structure", + "members":{ + "FaceDetail":{"shape":"FaceDetail"} + }, + "documentation":"

      Contains data regarding the input face used for a search.

      " + }, + "SearchedUser":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the UserID. Unique within the collection.

      " + } + }, + "documentation":"

      Contains metadata about a User searched for within a collection.

      " + }, "SegmentConfidence":{ "type":"float", "max":100, @@ -6674,6 +7190,148 @@ "type":"list", "member":{"shape":"UnindexedFace"} }, + "UnsearchedFace":{ + "type":"structure", + "members":{ + "FaceDetails":{"shape":"FaceDetail"}, + "Reasons":{ + "shape":"UnsearchedFaceReasons", + "documentation":"

      Reasons why a face wasn't used for Search.

      " + } + }, + "documentation":"

      Face details inferred from the image but not used for search. The response attribute contains reasons for why a face wasn't used for Search.

      " + }, + "UnsearchedFaceReason":{ + "type":"string", + "enum":[ + "FACE_NOT_LARGEST", + "EXCEEDS_MAX_FACES", + "EXTREME_POSE", + "LOW_BRIGHTNESS", + "LOW_SHARPNESS", + "LOW_CONFIDENCE", + "SMALL_BOUNDING_BOX", + "LOW_FACE_QUALITY" + ] + }, + "UnsearchedFaceReasons":{ + "type":"list", + "member":{"shape":"UnsearchedFaceReason"} + }, + "UnsearchedFacesList":{ + "type":"list", + "member":{"shape":"UnsearchedFace"} + }, + "UnsuccessfulFaceAssociation":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      A unique identifier assigned to the face.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the UserID. Unique within the collection.

      " + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

      Match confidence with the UserID, provides information regarding if a face association was unsuccessful because it didn't meet UserMatchThreshold.

      " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceAssociationReasons", + "documentation":"

      The reason why the association was unsuccessful.

      " + } + }, + "documentation":"

      Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully associated.

      " + }, + "UnsuccessfulFaceAssociationList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceAssociation"}, + "max":500, + "min":0 + }, + "UnsuccessfulFaceAssociationReason":{ + "type":"string", + "enum":[ + "FACE_NOT_FOUND", + "ASSOCIATED_TO_A_DIFFERENT_USER", + "LOW_MATCH_CONFIDENCE" + ] + }, + "UnsuccessfulFaceAssociationReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceAssociationReason"} + }, + "UnsuccessfulFaceDeletion":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      A unique identifier assigned to the face.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the UserID. Unique within the collection.

      " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceDeletionReasons", + "documentation":"

      The reason why the deletion was unsuccessful.

      " + } + }, + "documentation":"

      Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully deleted.

      " + }, + "UnsuccessfulFaceDeletionReason":{ + "type":"string", + "enum":[ + "ASSOCIATED_TO_AN_EXISTING_USER", + "FACE_NOT_FOUND" + ] + }, + "UnsuccessfulFaceDeletionReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDeletionReason"} + }, + "UnsuccessfulFaceDeletionsList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDeletion"}, + "max":4096, + "min":0 + }, + "UnsuccessfulFaceDisassociation":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

      A unique identifier assigned to the face.

      " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the UserID. Unique within the collection.

      " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceDisassociationReasons", + "documentation":"

      The reason why the deletion was unsuccessful.

      " + } + }, + "documentation":"

      Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully disassociated.

      " + }, + "UnsuccessfulFaceDisassociationList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDisassociation"}, + "max":500, + "min":0 + }, + "UnsuccessfulFaceDisassociationReason":{ + "type":"string", + "enum":[ + "FACE_NOT_FOUND", + "ASSOCIATED_TO_A_DIFFERENT_USER" + ] + }, + "UnsuccessfulFaceDisassociationReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDisassociationReason"} + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -6756,6 +7414,65 @@ "max":255, "min":0 }, + "User":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

      A provided ID for the User. Unique within the collection.

      " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

      Communicates if the UserID has been updated with latest set of faces to be associated with the UserID.

      " + } + }, + "documentation":"

      Metadata of the user stored in a collection.

      " + }, + "UserFaceIdList":{ + "type":"list", + "member":{"shape":"FaceId"}, + "max":100, + "min":1 + }, + "UserId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.\\-:]+" + }, + "UserList":{ + "type":"list", + "member":{"shape":"User"}, + "max":500 + }, + "UserMatch":{ + "type":"structure", + "members":{ + "Similarity":{ + "shape":"Percent", + "documentation":"

      Describes the UserID metadata.

      " + }, + "User":{ + "shape":"MatchedUser", + "documentation":"

      Confidence in the match of this UserID with the input face.

      " + } + }, + "documentation":"

      Provides UserID metadata along with the confidence in the match of this UserID with the input face.

      " + }, + "UserMatchList":{ + "type":"list", + "member":{"shape":"UserMatch"}, + "max":500 + }, + "UserStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATING", + "CREATING", + "CREATED" + ] + }, "ValidationData":{ "type":"structure", "members":{ @@ -6849,5 +7566,5 @@ "exception":true } }, - "documentation":"

      This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

      Amazon Rekognition Image

      Amazon Rekognition Custom Labels

      Amazon Rekognition Video Stored Video

      Amazon Rekognition Video Streaming Video

      " + "documentation":"

      This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

      Amazon Rekognition Image

      Amazon Rekognition Custom Labels

      Amazon Rekognition Video Stored Video

      Amazon Rekognition Video Streaming Video

      " } diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 7eb6b60b4820..fb39c9d3fa18 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index b15110381b50..7a913bf7c444 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index 414c0bbc09c6..dd06dcbfab04 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index cbb041ad29c0..9bf03ffff506 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index fd54629b1af4..d4078d926e43 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 7c4a956a9973..6232fd291586 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index fd22f67e8155..10f1d25fe12b 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index a9955af5497b..6dfc1df8c247 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json index 4f994c0ade7f..3ccf51cbf5b4 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json index b2039e364642..f837786f8dd6 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,18 @@ { "testCases": [ + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -8,8 +21,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -21,8 +34,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -34,34 +47,234 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53domains.us-east-1.amazonaws.com" + "url": "https://route53domains-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://route53domains-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -72,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -84,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index 25a4e2002824..8bb0ddb21abe 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -341,7 +341,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

      This operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.

      When you register a domain, Amazon Route 53 does the following:

      • Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

      • Enables auto renew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

      • Optionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, WHOIS queries return the information that you entered for the administrative, registrant, and technical contacts.

        You must specify the same privacy setting for the administrative, registrant, and technical contacts.

      • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

      • Charges your Amazon Web Services account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

      " + "documentation":"

      This operation registers a domain. For some top-level domains (TLDs), this operation requires extra parameters.

      When you register a domain, Amazon Route 53 does the following:

      • Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

      • Enables auto renew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

      • Optionally enables privacy protection, so WHOIS queries return contact for the registrar or the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\" If you don't enable privacy protection, WHOIS queries return the information that you entered for the administrative, registrant, and technical contacts.

        While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

      • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

      • Charges your Amazon Web Services account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

      " }, "RejectDomainTransferFromAnotherAwsAccount":{ "name":"RejectDomainTransferFromAnotherAwsAccount", @@ -432,7 +432,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

      Transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).

      For more information about transferring domains, see the following topics:

      If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

      If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

      If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

      " + "documentation":"

      Transfers a domain from another registrar to Amazon Route 53.

      For more information about transferring domains, see the following topics:

      If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

      If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

      If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

      " }, "TransferDomainToAnotherAwsAccount":{ "name":"TransferDomainToAnotherAwsAccount", @@ -482,7 +482,7 @@ {"shape":"OperationLimitExceeded"}, {"shape":"UnsupportedTLD"} ], - "documentation":"

      This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, contact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact information for our registrar associate, Gandi.

      You must specify the same privacy setting for the administrative, registrant, and technical contacts.

      This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

      By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

      " + "documentation":"

      This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, your contact information is replaced with contact information for the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\"

      While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

      This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

      By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

      " }, "UpdateDomainNameservers":{ "name":"UpdateDomainNameservers", @@ -1429,7 +1429,7 @@ "members":{ "Name":{ "shape":"ExtraParamName", - "documentation":"

      The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

      .com.au and .net.au
      • AU_ID_NUMBER

      • AU_ID_TYPE

        Valid values include the following:

        • ABN (Australian business number)

        • ACN (Australian company number)

        • TM (Trademark number)

      .ca
      • BRAND_NUMBER

      • CA_BUSINESS_ENTITY_TYPE

        Valid values include the following:

        • BANK (Bank)

        • COMMERCIAL_COMPANY (Commercial company)

        • COMPANY (Company)

        • COOPERATION (Cooperation)

        • COOPERATIVE (Cooperative)

        • COOPRIX (Cooprix)

        • CORP (Corporation)

        • CREDIT_UNION (Credit union)

        • FOMIA (Federation of mutual insurance associations)

        • INC (Incorporated)

        • LTD (Limited)

        • LTEE (Limitée)

        • LLC (Limited liability corporation)

        • LLP (Limited liability partnership)

        • LTE (Lte.)

        • MBA (Mutual benefit association)

        • MIC (Mutual insurance company)

        • NFP (Not-for-profit corporation)

        • SA (S.A.)

        • SAVINGS_COMPANY (Savings company)

        • SAVINGS_UNION (Savings union)

        • SARL (Société à responsabilité limitée)

        • TRUST (Trust)

        • ULC (Unlimited liability corporation)

      • CA_LEGAL_TYPE

        When ContactType is PERSON, valid values include the following:

        • ABO (Aboriginal Peoples indigenous to Canada)

        • CCT (Canadian citizen)

        • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

        • RES (Permanent resident of Canada)

        When ContactType is a value other than PERSON, valid values include the following:

        • ASS (Canadian unincorporated association)

        • CCO (Canadian corporation)

        • EDU (Canadian educational institution)

        • GOV (Government or government entity in Canada)

        • HOP (Canadian Hospital)

        • INB (Indian Band recognized by the Indian Act of Canada)

        • LAM (Canadian Library, Archive, or Museum)

        • MAJ (Her/His Majesty the Queen/King)

        • OMK (Official mark registered in Canada)

        • PLT (Canadian Political Party)

        • PRT (Partnership Registered in Canada)

        • TDM (Trademark registered in Canada)

        • TRD (Canadian Trade Union)

        • TRS (Trust established in Canada)

      .es
      • ES_IDENTIFICATION

        The value of ES_IDENTIFICATION depends on the following values:

        • The value of ES_LEGAL_FORM

        • The value of ES_IDENTIFICATION_TYPE

        If ES_LEGAL_FORM is any value other than INDIVIDUAL:

        • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

        • Example: B12345678

        If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

        • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

          • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

          • Example: 12345678M

        • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

          • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

          • Example: Y1234567X

        • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

          • Specify a passport number, drivers license number, or national identity card number

      • ES_IDENTIFICATION_TYPE

        Valid values include the following:

        • DNI_AND_NIF (For Spanish contacts)

        • NIE (For foreigners with legal residence)

        • OTHER (For contacts outside of Spain)

      • ES_LEGAL_FORM

        Valid values include the following:

        • ASSOCIATION

        • CENTRAL_GOVERNMENT_BODY

        • CIVIL_SOCIETY

        • COMMUNITY_OF_OWNERS

        • COMMUNITY_PROPERTY

        • CONSULATE

        • COOPERATIVE

        • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

        • ECONOMIC_INTEREST_GROUP

        • EMBASSY

        • ENTITY_MANAGING_NATURAL_AREAS

        • FARM_PARTNERSHIP

        • FOUNDATION

        • GENERAL_AND_LIMITED_PARTNERSHIP

        • GENERAL_PARTNERSHIP

        • INDIVIDUAL

        • LIMITED_COMPANY

        • LOCAL_AUTHORITY

        • LOCAL_PUBLIC_ENTITY

        • MUTUAL_INSURANCE_COMPANY

        • NATIONAL_PUBLIC_ENTITY

        • ORDER_OR_RELIGIOUS_INSTITUTION

        • OTHERS (Only for contacts outside of Spain)

        • POLITICAL_PARTY

        • PROFESSIONAL_ASSOCIATION

        • PUBLIC_LAW_ASSOCIATION

        • PUBLIC_LIMITED_COMPANY

        • REGIONAL_GOVERNMENT_BODY

        • REGIONAL_PUBLIC_ENTITY

        • SAVINGS_BANK

        • SPANISH_OFFICE

        • SPORTS_ASSOCIATION

        • SPORTS_FEDERATION

        • SPORTS_LIMITED_COMPANY

        • TEMPORARY_ALLIANCE_OF_ENTERPRISES

        • TRADE_UNION

        • WORKER_OWNED_COMPANY

        • WORKER_OWNED_LIMITED_COMPANY

      .eu
      • EU_COUNTRY_OF_CITIZENSHIP

      .fi
      • BIRTH_DATE_IN_YYYY_MM_DD

      • FI_BUSINESS_NUMBER

      • FI_ID_NUMBER

      • FI_NATIONALITY

        Valid values include the following:

        • FINNISH

        • NOT_FINNISH

      • FI_ORGANIZATION_TYPE

        Valid values include the following:

        • COMPANY

        • CORPORATION

        • GOVERNMENT

        • INSTITUTION

        • POLITICAL_PARTY

        • PUBLIC_COMMUNITY

        • TOWNSHIP

      .fr
      • BIRTH_CITY

      • BIRTH_COUNTRY

      • BIRTH_DATE_IN_YYYY_MM_DD

      • BIRTH_DEPARTMENT: Specify the INSEE code that corresponds with the department where the contact was born. If the contact was born somewhere other than France or its overseas departments, specify 99. For more information, including a list of departments and the corresponding INSEE numbers, see the Wikipedia entry Departments of France.

      • BRAND_NUMBER

      .it
      • IT_NATIONALITY

      • IT_PIN

      • IT_REGISTRANT_ENTITY_TYPE

        Valid values include the following:

        • FOREIGNERS

        • FREELANCE_WORKERS (Freelance workers and professionals)

        • ITALIAN_COMPANIES (Italian companies and one-person companies)

        • NON_PROFIT_ORGANIZATIONS

        • OTHER_SUBJECTS

        • PUBLIC_ORGANIZATIONS

      .ru
      • BIRTH_DATE_IN_YYYY_MM_DD

      • RU_PASSPORT_DATA

      .se
      • BIRTH_COUNTRY

      • SE_ID_NUMBER

      .sg
      • SG_ID_NUMBER

      .uk, .co.uk, .me.uk, and .org.uk
      • UK_CONTACT_TYPE

        Valid values include the following:

        • CRC (UK Corporation by Royal Charter)

        • FCORP (Non-UK Corporation)

        • FIND (Non-UK Individual, representing self)

        • FOTHER (Non-UK Entity that does not fit into any other category)

        • GOV (UK Government Body)

        • IND (UK Individual (representing self))

        • IP (UK Industrial/Provident Registered Company)

        • LLP (UK Limited Liability Partnership)

        • LTD (UK Limited Company)

        • OTHER (UK Entity that does not fit into any other category)

        • PLC (UK Public Limited Company)

        • PTNR (UK Partnership)

        • RCHAR (UK Registered Charity)

        • SCH (UK School)

        • STAT (UK Statutory Body)

        • STRA (UK Sole Trader)

      • UK_COMPANY_NUMBER

      In addition, many TLDs require a VAT_NUMBER.

      " + "documentation":"

      The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

      .com.au and .net.au
      • AU_ID_NUMBER

      • AU_ID_TYPE

        Valid values include the following:

        • ABN (Australian business number)

        • ACN (Australian company number)

        • TM (Trademark number)

      .ca
      • BRAND_NUMBER

      • CA_BUSINESS_ENTITY_TYPE

        Valid values include the following:

        • BANK (Bank)

        • COMMERCIAL_COMPANY (Commercial company)

        • COMPANY (Company)

        • COOPERATION (Cooperation)

        • COOPERATIVE (Cooperative)

        • COOPRIX (Cooprix)

        • CORP (Corporation)

        • CREDIT_UNION (Credit union)

        • FOMIA (Federation of mutual insurance associations)

        • INC (Incorporated)

        • LTD (Limited)

        • LTEE (Limitée)

        • LLC (Limited liability corporation)

        • LLP (Limited liability partnership)

        • LTE (Lte.)

        • MBA (Mutual benefit association)

        • MIC (Mutual insurance company)

        • NFP (Not-for-profit corporation)

        • SA (S.A.)

        • SAVINGS_COMPANY (Savings company)

        • SAVINGS_UNION (Savings union)

        • SARL (Société à responsabilité limitée)

        • TRUST (Trust)

        • ULC (Unlimited liability corporation)

      • CA_LEGAL_TYPE

        When ContactType is PERSON, valid values include the following:

        • ABO (Aboriginal Peoples indigenous to Canada)

        • CCT (Canadian citizen)

        • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

        • RES (Permanent resident of Canada)

        When ContactType is a value other than PERSON, valid values include the following:

        • ASS (Canadian unincorporated association)

        • CCO (Canadian corporation)

        • EDU (Canadian educational institution)

        • GOV (Government or government entity in Canada)

        • HOP (Canadian Hospital)

        • INB (Indian Band recognized by the Indian Act of Canada)

        • LAM (Canadian Library, Archive, or Museum)

        • MAJ (Her/His Majesty the Queen/King)

        • OMK (Official mark registered in Canada)

        • PLT (Canadian Political Party)

        • PRT (Partnership Registered in Canada)

        • TDM (Trademark registered in Canada)

        • TRD (Canadian Trade Union)

        • TRS (Trust established in Canada)

      .es
      • ES_IDENTIFICATION

        The value of ES_IDENTIFICATION depends on the following values:

        • The value of ES_LEGAL_FORM

        • The value of ES_IDENTIFICATION_TYPE

        If ES_LEGAL_FORM is any value other than INDIVIDUAL:

        • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

        • Example: B12345678

        If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

        • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

          • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

          • Example: 12345678M

        • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

          • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

          • Example: Y1234567X

        • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

          • Specify a passport number, drivers license number, or national identity card number

      • ES_IDENTIFICATION_TYPE

        Valid values include the following:

        • DNI_AND_NIF (For Spanish contacts)

        • NIE (For foreigners with legal residence)

        • OTHER (For contacts outside of Spain)

      • ES_LEGAL_FORM

        Valid values include the following:

        • ASSOCIATION

        • CENTRAL_GOVERNMENT_BODY

        • CIVIL_SOCIETY

        • COMMUNITY_OF_OWNERS

        • COMMUNITY_PROPERTY

        • CONSULATE

        • COOPERATIVE

        • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

        • ECONOMIC_INTEREST_GROUP

        • EMBASSY

        • ENTITY_MANAGING_NATURAL_AREAS

        • FARM_PARTNERSHIP

        • FOUNDATION

        • GENERAL_AND_LIMITED_PARTNERSHIP

        • GENERAL_PARTNERSHIP

        • INDIVIDUAL

        • LIMITED_COMPANY

        • LOCAL_AUTHORITY

        • LOCAL_PUBLIC_ENTITY

        • MUTUAL_INSURANCE_COMPANY

        • NATIONAL_PUBLIC_ENTITY

        • ORDER_OR_RELIGIOUS_INSTITUTION

        • OTHERS (Only for contacts outside of Spain)

        • POLITICAL_PARTY

        • PROFESSIONAL_ASSOCIATION

        • PUBLIC_LAW_ASSOCIATION

        • PUBLIC_LIMITED_COMPANY

        • REGIONAL_GOVERNMENT_BODY

        • REGIONAL_PUBLIC_ENTITY

        • SAVINGS_BANK

        • SPANISH_OFFICE

        • SPORTS_ASSOCIATION

        • SPORTS_FEDERATION

        • SPORTS_LIMITED_COMPANY

        • TEMPORARY_ALLIANCE_OF_ENTERPRISES

        • TRADE_UNION

        • WORKER_OWNED_COMPANY

        • WORKER_OWNED_LIMITED_COMPANY

      .eu
      • EU_COUNTRY_OF_CITIZENSHIP

      .fi
      • BIRTH_DATE_IN_YYYY_MM_DD

      • FI_BUSINESS_NUMBER

      • FI_ID_NUMBER

      • FI_NATIONALITY

        Valid values include the following:

        • FINNISH

        • NOT_FINNISH

      • FI_ORGANIZATION_TYPE

        Valid values include the following:

        • COMPANY

        • CORPORATION

        • GOVERNMENT

        • INSTITUTION

        • POLITICAL_PARTY

        • PUBLIC_COMMUNITY

        • TOWNSHIP

      .it
      • IT_NATIONALITY

      • IT_PIN

      • IT_REGISTRANT_ENTITY_TYPE

        Valid values include the following:

        • FOREIGNERS

        • FREELANCE_WORKERS (Freelance workers and professionals)

        • ITALIAN_COMPANIES (Italian companies and one-person companies)

        • NON_PROFIT_ORGANIZATIONS

        • OTHER_SUBJECTS

        • PUBLIC_ORGANIZATIONS

      .ru
      • BIRTH_DATE_IN_YYYY_MM_DD

      • RU_PASSPORT_DATA

      .se
      • BIRTH_COUNTRY

      • SE_ID_NUMBER

      .sg
      • SG_ID_NUMBER

      .uk, .co.uk, .me.uk, and .org.uk
      • UK_CONTACT_TYPE

        Valid values include the following:

        • CRC (UK Corporation by Royal Charter)

        • FCORP (Non-UK Corporation)

        • FIND (Non-UK Individual, representing self)

        • FOTHER (Non-UK Entity that does not fit into any other category)

        • GOV (UK Government Body)

        • IND (UK Individual (representing self))

        • IP (UK Industrial/Provident Registered Company)

        • LLP (UK Limited Liability Partnership)

        • LTD (UK Limited Company)

        • OTHER (UK Entity that does not fit into any other category)

        • PLC (UK Public Limited Company)

        • PTNR (UK Partnership)

        • RCHAR (UK Registered Charity)

        • SCH (UK School)

        • STAT (UK Statutory Body)

        • STRA (UK Sole Trader)

      • UK_COMPANY_NUMBER

      In addition, many TLDs require a VAT_NUMBER.

      " }, "Value":{ "shape":"ExtraParamValue", @@ -1853,6 +1853,10 @@ "type":"string", "enum":["SubmittedDate"] }, + "ListPricesPageMaxItems":{ + "type":"integer", + "max":1000 + }, "ListPricesRequest":{ "type":"structure", "members":{ @@ -1865,7 +1869,7 @@ "documentation":"

      For an initial request for a list of prices, omit this element. If the number of prices that are not yet complete is greater than the value that you specified for MaxItems, you can use Marker to return additional prices. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

      Used only for all TLDs. If you specify a TLD, don't specify a Marker.

      " }, "MaxItems":{ - "shape":"PageMaxItems", + "shape":"ListPricesPageMaxItems", "documentation":"

      Number of Prices to be returned.

      Used only for all TLDs. If you specify a TLD, don't specify a MaxItems.

      " } } @@ -2398,7 +2402,7 @@ }, "PrivacyProtectAdminContact":{ "shape":"Boolean", - "documentation":"

      Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

      You must specify the same privacy setting for the administrative, registrant, and technical contacts.

      Default: true

      " + "documentation":"

      Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\".

      While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

      Default: true

      " }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", @@ -2531,7 +2535,7 @@ }, "Consent":{ "shape":"Consent", - "documentation":"

      Customer's consent for the owner change request.

      " + "documentation":"

      Customer's consent for the owner change request. Required if the domain is not free (consent price is more than $0.00).

      " } }, "documentation":"

      The UpdateDomainContact request includes the following elements.

      " diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 6ca94f3fd4dd..1b5b7ba72d8d 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index be0f4cb8d59e..e5326e11fd82 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index a38df72ee4c9..59bb2a5fca2b 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index a60b0766c1e8..2944d6fb54a5 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 66bf357d3bec..be68a634dde9 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/rum/src/main/resources/codegen-resources/customization.config b/services/rum/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..0e729acd0371 --- /dev/null +++ b/services/rum/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "generateEndpointClientTests": true +} diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 2db7037a6855..f7c15100ac90 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java index c09c31d42a5a..e3e125c9d084 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java @@ -130,7 +130,7 @@ private void doCopyInParts(CopyObjectRequest copyObjectRequest, Long contentLength, CompletableFuture returnFuture, String uploadId) { - long optimalPartSize = calculateOptimalPartSizeForCopy(partSizeInBytes); + long optimalPartSize = calculateOptimalPartSizeForCopy(contentLength); int partCount = determinePartCount(contentLength, optimalPartSize); diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config index d09990c54ad8..18756b83c79a 100644 --- a/services/s3/src/main/resources/codegen-resources/customization.config +++ b/services/s3/src/main/resources/codegen-resources/customization.config @@ -155,62 +155,19 @@ "hasAccelerateModeEnabledProperty":true }, "skipEndpointTests": { - "region is not a valid DNS-suffix": "Validation for region happens in client builder", "Invalid access point ARN: Not S3": "Test assumes UseArnRegion is true but SDK defaults to false", "Invalid access point ARN: AccountId is invalid": "Test assumes UseArnRegion is true but SDK defaults to false", "Invalid access point ARN: access point name is invalid": "Test assumes UseArnRegion is true but SDK defaults to false", "Access points (disable access points explicitly false)": "Test assumes UseArnRegion is true but SDK defaults to false", "Bucket region is invalid": "Test assumes UseArnRegion is true but SDK defaults to false", - "Access points when Access points explicitly disabled (used for CreateBucket)": "Explicitly disabling access points not support on client", "Access point ARN with FIPS & Dualstack": "Test assumes UseArnRegion is true but SDK defaults to false", "Access point ARN with Dualstack": "Test assumes UseArnRegion is true but SDK defaults to false", - "scheme is respected (virtual addressing)": "Test is broken for client test, needs operationInputs for Bucket", - "scheme is respected": "Test is broken for client test, needs operationInputs for Bucket", - "invalid Endpoint override": "Test is broken for client test, needs operationInputs for Bucket", "vanilla access point arn with region mismatch and UseArnRegion unset": "SDK config default to UseArnRegion = false", "no region set": "SDK client builder requires a region", - "ForcePathStyle, aws-global region with fips is invalid": "Test is broken for client tests, need operationInputs for Bucket param", - "accelerate (dualstack=false)@cn-north-1": "Test is broken for client tests, need operationInputs for Bucket param", "path style + accelerate = error@us-west-2": "Validation for this happens during client build time", "path style + accelerate = error@cn-north-1": "Validation for this happens during client build time", "path style + accelerate = error@af-south-1": "Validation for this happens during client build time", - "SDK::HOST + accelerate@cn-north-1": "Test is broken for client tests, need operationInputs for Bucket param", - "outposts arn with region mismatch and UseArnRegion unset": "SDK default to UseArnRegion=false", - "S3 outposts does not support dualstack": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 outposts does not support fips": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 outposts does not support accelerate": "Test is broken for client tests, need operationInputs for Bucket param", - "validates against subresource": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda @us-east-1": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda @us-gov-east-1": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda @us-gov-east-1, with fips": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - invalid resource": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - missing region": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - missing account-id": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - missing access point name": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - access point name contains invalid character: *": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - access point name contains invalid character: .": "Test is broken for client tests, need operationInputs for Bucket param", - "object lambda with invalid arn - access point name contains sub resources": "Test is broken for client tests, need operationInputs for Bucket param", - "WriteGetObjectResponse with accelerate": "Test is broken for client tests, need operationInputs to make SDK use WriteGetObjectResponse", - "WriteGetObjectResponse with invalid partition": "SDK validates region earlier in the chain", - "WriteGetObjectResponse with an unknown partition": "Test is broken for client tests, need operationInputs to make SDK use WriteGetObjectResponse", - "S3 Outposts Abba Real Outpost Prod us-west-1": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Real Outpost Prod ap-east-1": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Ec2 Outpost Prod us-east-1": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Ec2 Outpost Prod me-south-1": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Real Outpost Beta": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Ec2 Outpost Beta": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba - No endpoint set for beta": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Invalid hardware type": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Abba Special character in Outpost Arn": "Test is broken for client tests, need operationInputs for Bucket param", - "S3 Outposts Snow with bucket": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow without bucket": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow no port": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow dns endpoint": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow invalid url": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow FIPS enabled": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow Dual-stack enabled": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "S3 Outposts Snow Accelerate enabled": "Test is broken for client tests, need operationInputs for Bucket and SnowEndpointUrl param", - "Endpoint override, accesspoint with http, path, query, and port": "Test is broken for client tests, need operationInputs for Bucket param" + "outposts arn with region mismatch and UseArnRegion unset": "SDK default to UseArnRegion=false" }, "attachPayloadTraitToMember": { "GetBucketLocationOutput": "LocationConstraint" diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json index dd599bc3414c..f1dc6f77dccf 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json @@ -2628,6 +2628,1366 @@ "Accelerate": false } }, + { + "documentation": "non-bucket endpoint with FIPS: TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "FIPS + dualstack + custom endpoint TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "dualstack + custom endpoint TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "custom endpoint without FIPS/dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "s3 object lambda with access points disabled", + "expect": { + "error": "Access points are not supported for this operation" + }, + "params": { + "Region": "us-west-2", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:myendpoint", + "DisableAccessPoints": true + } + }, + { + "documentation": "non bucket + FIPS", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "standard non bucket endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "non bucket endpoint with FIPS + Dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "non bucket endpoint with dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.dualstack.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "use global endpoint + IP address endpoint override", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://127.0.0.1/bucket" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "http://127.0.0.1", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "non-dns endpoint + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": false, + "UseDualStack": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": false, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + dualstack + non-bucket endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "FIPS + dualstack + non-DNS endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "endpoint override + FIPS + dualstack (BUG)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "endpoint override + non-dns bucket + FIPS (BUG)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + bucket endpoint + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "bucket + FIPS + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + dualstack + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://bucket.s3-fips.dualstack.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": true, + "UseDualStack": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "URI encoded bucket + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "https://foo.com" + } + }, + { + "documentation": "FIPS + path based endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "accelerate + dualstack + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://bucket.s3-accelerate.dualstack.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": false, + "UseDualStack": true, + "Accelerate": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "dualstack + global endpoint + non URI safe bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "Accelerate": false, + "UseDualStack": true, + "UseFIPS": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + uri encoded bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "Accelerate": false, + "UseDualStack": false, + "UseFIPS": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + non-uri safe endpoint + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "Accelerate": false, + "UseDualStack": false, + "UseFIPS": true, + "Endpoint": "http://foo.com", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + Dualstack + global endpoint + non-dns bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "Accelerate": false, + "UseDualStack": true, + "UseFIPS": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + FIPS + dualstack (this is wrong—it's a bug in the UseGlobalEndpoint branch)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": true, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "non-bucket endpoint override + dualstack + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "Endpoint override + UseGlobalEndpoint + us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "non-FIPS partition with FIPS set + custom endpoint", + "expect": { + "error": "Partition does not support FIPS" + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global signs as us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseFIPS": true, + "Accelerate": false, + "UseDualStack": true + } + }, + { + "documentation": "aws-global signs as us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.foo.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "UseDualStack": false, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "https://foo.com" + } + }, + { + "documentation": "aws-global + dualstack + path-only bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": false + } + }, + { + "documentation": "aws-global + path-only bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!" + } + }, + { + "documentation": "aws-global + fips + custom endpoint (TODO: should be an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": false, + "UseFIPS": true, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global, endpoint override & path only-bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": false, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global + dualstack + custom endpoint (TODO: should be an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "accelerate, dualstack + aws-global", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.s3-accelerate.dualstack.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": true + } + }, + { + "documentation": "FIPS + aws-global + path only bucket. TODO: this should be an error", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseDualStack": true, + "UseFIPS": true, + "Accelerate": false + } + }, + { + "documentation": "aws-global + FIPS + endpoint override. TODO: should this be an error?", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "force path style, aws-global & endpoint override", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "ip address causes path style to be forced", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://192.168.1.1/bucket" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "Endpoint": "http://192.168.1.1" + } + }, + { + "documentation": "endpoint override with aws-global region", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "UseDualStack": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + path-only (TODO: consider making this an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseFIPS": true + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Invalid ARN: No ARN type specified" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:not-s3:us-west-2:123456789012::myendpoint" + } + }, + { + "documentation": "path style can't be used with accelerate", + "expect": { + "error": "Path-style addressing cannot be used with S3 Accelerate" + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "Accelerate": true + } + }, + { + "documentation": "invalid region", + "expect": { + "error": "Invalid region: region was not a valid DNS name." + }, + "params": { + "Region": "us-east-2!", + "Bucket": "bucket.subdomain", + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "invalid region", + "expect": { + "error": "Invalid region: region was not a valid DNS name." + }, + "params": { + "Region": "us-east-2!", + "Bucket": "bucket", + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Invalid Access Point Name" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3::123456789012:accesspoint:my_endpoint" + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Client was configured for partition `aws` but ARN (`arn:aws:s3:cn-north-1:123456789012:accesspoint:my-endpoint`) has `aws-cn`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3:cn-north-1:123456789012:accesspoint:my-endpoint", + "UseArnRegion": true + } + }, + { + "documentation": "invalid arn region", + "expect": { + "error": "Invalid region in ARN: `us-east_2` (invalid DNS name)" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-object-lambda:us-east_2:123456789012:accesspoint:my-endpoint", + "UseArnRegion": true + } + }, + { + "documentation": "invalid ARN outpost", + "expect": { + "error": "Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `op_01234567890123456`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op_01234567890123456/accesspoint/reports", + "UseArnRegion": true + } + }, + { + "documentation": "invalid ARN", + "expect": { + "error": "Invalid ARN: expected an access point name" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/reports" + } + }, + { + "documentation": "invalid ARN", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Expected an outpost type `accesspoint`, found not-accesspoint" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid region in ARN: `us-east_1` (invalid DNS name)" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east_1:123456789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `12345_789012`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:12345_789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid ARN: The Outpost Id was not set" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:12345789012:outpost" + } + }, + { + "documentation": "use global endpoint virtual addressing", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://bucket.example.com" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Endpoint": "http://example.com", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "global endpoint + ip address", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://192.168.0.1/bucket" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Endpoint": "http://192.168.0.1", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.us-east-2.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.s3-accelerate.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Accelerate": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "use global endpoint + custom endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "use global endpoint, not us-east-1, force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true, + "ForcePathStyle": true, + "Endpoint": "http://foo.com" + } + }, { "documentation": "vanilla virtual addressing@us-west-2", "expect": { @@ -6495,6 +7855,122 @@ "UseDualStack": false, "Accelerate": false } + }, + { + "documentation": "S3 Outposts Abba - No endpoint set for beta", + "expect": { + "error": "Expected a endpoint to be specified but no endpoint was found" + }, + "params": { + "Region": "us-east-1", + "Bucket": "test-accessp-e0b1d075431d83bebde8xz5w8ijx1qzlbp3i3ebeta0--op-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow with bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://10.0.1.12:433/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow without bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://10.0.1.12:433" + } + }, + "params": { + "Region": "snow", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow no port", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://10.0.1.12/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow dns endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://amazonaws.com/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://amazonaws.com", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } } ], "version": "1.0" diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index ad4987a2e605..4b91bba4672c 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

      Creates a copy of an object that is already stored in Amazon S3.

      You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

      All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

      A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

      If the copy is successful, you receive a response with information about the copied object.

      If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

      The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

      Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

      Metadata

      When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

      To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

      x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

      x-amz-copy-source-if Headers

      To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

      • x-amz-copy-source-if-match

      • x-amz-copy-source-if-none-match

      • x-amz-copy-source-if-unmodified-since

      • x-amz-copy-source-if-modified-since

      If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

      • x-amz-copy-source-if-match condition evaluates to true

      • x-amz-copy-source-if-unmodified-since condition evaluates to false

      If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

      • x-amz-copy-source-if-none-match condition evaluates to false

      • x-amz-copy-source-if-modified-since condition evaluates to true

      All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

      Server-side encryption

      Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

      When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

      If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

      Access Control List (ACL)-Specific Request Headers

      When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

      If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

      For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

      If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

      Checksums

      When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you may optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

      Storage Class Options

      You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

      If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

      Versioning

      By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

      If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

      If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

      The following operations are related to CopyObject:

      ", + "documentation":"

      Creates a copy of an object that is already stored in Amazon S3.

      You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

      All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

      A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

      If the copy is successful, you receive a response with information about the copied object.

      If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

      The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

      Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

      Metadata

      When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

      To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

      x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

      x-amz-copy-source-if Headers

      To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

      • x-amz-copy-source-if-match

      • x-amz-copy-source-if-none-match

      • x-amz-copy-source-if-unmodified-since

      • x-amz-copy-source-if-modified-since

      If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

      • x-amz-copy-source-if-match condition evaluates to true

      • x-amz-copy-source-if-unmodified-since condition evaluates to false

      If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

      • x-amz-copy-source-if-none-match condition evaluates to false

      • x-amz-copy-source-if-modified-since condition evaluates to true

      All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

      Server-side encryption

      Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

      When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

      If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

      Access Control List (ACL)-Specific Request Headers

      When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

      If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

      For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

      If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

      Checksums

      When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

      Storage Class Options

      You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

      If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

      Versioning

      By default, x-amz-copy-source header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

      If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

      If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

      The following operations are related to CopyObject:

      ", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyOwnedByYou"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"

      Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

      Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.

      If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

      By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.

      If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.

      Access control lists (ACLs)

      When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or groups that should be granted specific permissions on the bucket.

      If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership error code. For more information, see Controlling object ownership in the Amazon S3 User Guide.

      There are two ways to grant the appropriate permissions using the request headers.

      • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

      • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.

        You specify each grantee as a type=value pair, where the type is one of the following:

        • id – if the value specified is the canonical user ID of an Amazon Web Services account

        • uri – if you are granting permissions to a predefined group

        • emailAddress – if the value specified is the email address of an Amazon Web Services account

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

        x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

      You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

      Permissions

      In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

      • ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

      • Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

      • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is required.

      The following operations are related to CreateBucket:

      ", + "documentation":"

      Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

      Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.

      If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

      By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.

      If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.

      Permissions

      In addition to s3:CreateBucket, the following permissions are required when your CreateBucket request includes specific headers:

      • Access control lists (ACLs) - If your CreateBucket request specifies access control list (ACL) permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL for the CreateBucket request is private or if the request doesn't specify any ACLs, only s3:CreateBucket permission is needed.

      • Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

      • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required. By default, ObjectOwnership is set to BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership setting, you can use the x-amz-object-ownership header in your CreateBucket request to set the ObjectOwnership setting of your choice. For more information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 User Guide.

      • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. By default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent exposure of your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

      If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 error and returns the InvalidBucketAcLWithObjectOwnership error code. For more information, see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide.

      The following operations are related to CreateBucket:

      ", "alias":"PutBucket", "staticContextParams":{ "DisableAccessPoints":{"value":true} @@ -115,7 +115,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"

      Deletes the cors configuration information set for the bucket.

      To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

      For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

      The following operations are related to DeleteBucketCors:

      " + "documentation":"

      Deletes the cors configuration information set for the bucket.

      To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

      For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

      Related Resources

      " }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -516,7 +516,7 @@ {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

      Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

      An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

      To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

      For more information about returning the ACL of an object, see GetObjectAcl.

      If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects.

      Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

      If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

      • x-amz-server-side-encryption-customer-algorithm

      • x-amz-server-side-encryption-customer-key

      • x-amz-server-side-encryption-customer-key-MD5

      For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

      Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

      Permissions

      You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

      • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

      • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

      Versioning

      By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

      • If you supply a versionId, you need the s3:GetObjectVersion permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject permission. If you request the current version without a specific version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't be required.

      • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

      For more information about versioning, see PutBucketVersioning.

      Overriding Response Header Values

      There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

      You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

      You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

      • response-content-type

      • response-content-language

      • response-expires

      • response-cache-control

      • response-content-disposition

      • response-content-encoding

      Overriding Response Header Values

      If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

      If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

      For more information about conditional requests, see RFC 7232.

      The following operations are related to GetObject:

      ", + "documentation":"

      Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

      An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

      To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

      For more information about returning the ACL of an object, see GetObjectAcl.

      If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects.

      Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error.

      If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

      • x-amz-server-side-encryption-customer-algorithm

      • x-amz-server-side-encryption-customer-key

      • x-amz-server-side-encryption-customer-key-MD5

      For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

      Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

      Permissions

      You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

      If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error.

      If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.

      Versioning

      By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

      • If you supply a versionId, you need the s3:GetObjectVersion permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject permission. If you request the current version without a specific version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't be required.

      • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

      For more information about versioning, see PutBucketVersioning.

      Overriding Response Header Values

      There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

      You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

      You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

      • response-content-type

      • response-content-language

      • response-expires

      • response-cache-control

      • response-content-disposition

      • response-content-encoding

      Overriding Response Header Values

      If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

      If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

      For more information about conditional requests, see RFC 7232.

      The following operations are related to GetObject:

      ", "httpChecksum":{ "requestValidationModeMember":"ChecksumMode", "responseAlgorithms":[ @@ -640,7 +640,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

      The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

      A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exact exception beyond these error codes.

      If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

      • x-amz-server-side-encryption-customer-algorithm

      • x-amz-server-side-encryption-customer-key

      • x-amz-server-side-encryption-customer-key-MD5

      For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

      • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

      • The last modified property in this case is the creation date of the object.

      Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

      Consider the following when using request headers:

      • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

        • If-Match condition evaluates to true, and;

        • If-Unmodified-Since condition evaluates to false;

        Then Amazon S3 returns 200 OK and the data requested.

      • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

        • If-None-Match condition evaluates to false, and;

        • If-Modified-Since condition evaluates to true;

        Then Amazon S3 returns the 304 Not Modified response code.

      For more information about conditional requests, see RFC 7232.

      Permissions

      You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

      • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.

      • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.

      The following actions are related to HeadObject:

      " + "documentation":"

      The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

      A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exact exception beyond these error codes.

      If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

      • x-amz-server-side-encryption-customer-algorithm

      • x-amz-server-side-encryption-customer-key

      • x-amz-server-side-encryption-customer-key-MD5

      For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

      • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error.

      • The last modified property in this case is the creation date of the object.

      Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

      Consider the following when using request headers:

      • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

        • If-Match condition evaluates to true, and;

        • If-Unmodified-Since condition evaluates to false;

        Then Amazon S3 returns 200 OK and the data requested.

      • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

        • If-None-Match condition evaluates to false, and;

        • If-Modified-Since condition evaluates to true;

        Then Amazon S3 returns the 304 Not Modified response code.

      For more information about conditional requests, see RFC 7232.

      Permissions

      You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

      • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 error.

      • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 error.

      The following actions are related to HeadObject:

      " }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -776,7 +776,7 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"

      Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

      You can use one of the following two ways to set a bucket's permissions:

      • Specify the ACL in the request body

      • Specify permissions using request headers

      You cannot specify access permission using both the body and the request headers.

      Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

      Permissions

      You can set access permissions using one of the following methods:

      • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

      • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

        You specify each grantee as a type=value pair, where the type is one of the following:

        • id – if the value specified is the canonical user ID of an Amazon Web Services account

        • uri – if you are granting permissions to a predefined group

        • emailAddress – if the value specified is the email address of an Amazon Web Services account

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

        x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

      You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

      Grantee Values

      You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

      • By the person's ID:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

        DisplayName is optional and ignored in the request

      • By URI:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

      • By Email address:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

        The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      The following operations are related to PutBucketAcl:

      ", + "documentation":"

      Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

      You can use one of the following two ways to set a bucket's permissions:

      • Specify the ACL in the request body

      • Specify permissions using request headers

      You cannot specify access permission using both the body and the request headers.

      Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

      Permissions

      You can set access permissions by using one of the following methods:

      • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

      • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

        You specify each grantee as a type=value pair, where the type is one of the following:

        • id – if the value specified is the canonical user ID of an Amazon Web Services account

        • uri – if you are granting permissions to a predefined group

        • emailAddress – if the value specified is the email address of an Amazon Web Services account

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

        x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

      You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

      Grantee Values

      You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

      • By the person's ID:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

        DisplayName is optional and ignored in the request

      • By URI:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

      • By Email address:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

        The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      The following operations are related to PutBucketAcl:

      ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -812,7 +812,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

      This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

      By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

      This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

      To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

      The following operations are related to PutBucketEncryption:

      ", + "documentation":"

      This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

      By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided keys (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

      This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

      To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

      The following operations are related to PutBucketEncryption:

      ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -858,7 +858,7 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

      Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.

      Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

      Rules

      You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following:

      • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

      • Status whether the rule is in effect.

      • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

      For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

      Permissions

      By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

      You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

      • s3:DeleteObject

      • s3:DeleteObjectVersion

      • s3:PutLifecycleConfiguration

      For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

      The following operations are related to PutBucketLifecycleConfiguration:

      ", + "documentation":"

      Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.

      Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

      Rules

      You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following:

      • A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

      • A status indicating whether the rule is in effect.

      • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

      For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

      Permissions

      By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

      You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

      • s3:DeleteObject

      • s3:DeleteObjectVersion

      • s3:PutLifecycleConfiguration

      For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

      The following operations are related to PutBucketLifecycleConfiguration:

      ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -872,7 +872,7 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"

      Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

      The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

      If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

      Grantee Values

      You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

      • By the person's ID:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

        DisplayName is optional and ignored in the request.

      • By Email address:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

        The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

      • By URI:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

      To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

      <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

      For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

      For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

      The following operations are related to PutBucketLogging:

      ", + "documentation":"

      Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

      The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

      If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

      Grantee Values

      You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:

      • By the person's ID:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

        DisplayName is optional and ignored in the request.

      • By Email address:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

        The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl request, appears as the CanonicalUser.

      • By URI:

        <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

      To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

      <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

      For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

      For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

      The following operations are related to PutBucketLogging:

      ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -885,7 +885,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"PutBucketMetricsConfigurationRequest"}, - "documentation":"

      Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

      To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

      For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

      The following operations are related to PutBucketMetricsConfiguration:

      GetBucketLifecycle has the following special error:

      • Error code: TooManyConfigurations

        • Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

        • HTTP Status Code: HTTP 400 Bad Request

      " + "documentation":"

      Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

      To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

      For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

      The following operations are related to PutBucketMetricsConfiguration:

      PutBucketMetricsConfiguration has the following special error:

      • Error code: TooManyConfigurations

        • Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

        • HTTP Status Code: HTTP 400 Bad Request

      " }, "PutBucketNotification":{ "name":"PutBucketNotification", @@ -909,7 +909,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"

      Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

      Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

      By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

      <NotificationConfiguration>

      </NotificationConfiguration>

      This action replaces the existing notification configuration with the configuration you include in the request body.

      After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

      You can disable notifications by adding the empty NotificationConfiguration element.

      For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.

      By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

      The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

      If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

      The following action is related to PutBucketNotificationConfiguration:

      " + "documentation":"

      Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

      Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

      By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

      <NotificationConfiguration>

      </NotificationConfiguration>

      This action replaces the existing notification configuration with the configuration you include in the request body.

      After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

      You can disable notifications by adding the empty NotificationConfiguration element.

      For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.

      By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with the required s3:PutBucketNotification permission.

      The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

      If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

      The following action is related to PutBucketNotificationConfiguration:

      " }, "PutBucketOwnershipControls":{ "name":"PutBucketOwnershipControls", @@ -1013,7 +1013,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

      Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

      Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

      Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock.

      To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

      • To successfully complete the PutObject request, you must have the s3:PutObject in your IAM permissions.

      • To successfully change the objects acl of your PutObject request, you must have the s3:PutObjectAcl in your IAM permissions.

      • To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions.

      • The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

      You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at by rest using server-side encryption with other key options. For more information, see Using Server-Side Encryption.

      When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

      If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

      If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

      By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

      If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

      For more information about related Amazon S3 APIs, see the following:

      ", + "documentation":"

      Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

      Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

      Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock.

      To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

      • To successfully complete the PutObject request, you must have the s3:PutObject in your IAM permissions.

      • To successfully change the objects acl of your PutObject request, you must have the s3:PutObjectAcl in your IAM permissions.

      • To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions.

      • The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

      You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption.

      When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

      If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

      If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

      By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

      If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

      For more information about related Amazon S3 APIs, see the following:

      ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1118,7 +1118,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

      Restores an archived copy of an object back into Amazon S3

      This action is not supported by Amazon S3 on Outposts.

      This action performs the following types of requests:

      • select - Perform a select query on an archived object

      • restore an archive - Restore an archived object

      For more information about the S3 structure in the request body, see the following:

      Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

      • The following expression returns all records from the specified object.

        SELECT * FROM Object

      • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

        SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

      • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

        SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

      When making a select request, you can also do the following:

      • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

      • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

      The following are additional important facts about the select feature:

      • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle configuration.

      • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.

      • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

      Permissions

      To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

      Restoring objects

      Objects that you archive to the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

      To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

      When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

      • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

      • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

      • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

      For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

      You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

      To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

      After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

      If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

      Responses

      A successful action returns either the 200 OK or 202 Accepted status code.

      • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

      • If the object is previously restored, Amazon S3 returns 200 OK in the response.

      • Special errors:

        • Code: RestoreAlreadyInProgress

        • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

        • HTTP Status Code: 409 Conflict

        • SOAP Fault Code Prefix: Client

        • Code: GlacierExpeditedRetrievalNotAvailable

        • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

        • HTTP Status Code: 503

        • SOAP Fault Code Prefix: N/A

      The following operations are related to RestoreObject:

      ", + "documentation":"

      Restores an archived copy of an object back into Amazon S3

      This action is not supported by Amazon S3 on Outposts.

      This action performs the following types of requests:

      • select - Perform a select query on an archived object

      • restore an archive - Restore an archived object

      For more information about the S3 structure in the request body, see the following:

      Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

      • The following expression returns all records from the specified object.

        SELECT * FROM Object

      • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

        SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

      • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

        SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

      When making a select request, you can also do the following:

      • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

      • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

      The following are additional important facts about the select feature:

      • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle configuration.

      • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.

      • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

      Permissions

      To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

      Restoring objects

      Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

      To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

      When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

      • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

      • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

      • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

      For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

      You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

      To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

      After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

      If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

      Responses

      A successful action returns either the 200 OK or 202 Accepted status code.

      • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

      • If the object is previously restored, Amazon S3 returns 200 OK in the response.

      • Special errors:

        • Code: RestoreAlreadyInProgress

        • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

        • HTTP Status Code: 409 Conflict

        • SOAP Fault Code Prefix: Client

        • Code: GlacierExpeditedRetrievalNotAvailable

        • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

        • HTTP Status Code: 503

        • SOAP Fault Code Prefix: N/A

      The following operations are related to RestoreObject:

      ", "alias":"PostObjectRestore", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1137,7 +1137,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

      This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

      This action is not supported by Amazon S3 on Outposts.

      For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

      Permissions

      You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

      Object Data Formats

      You can use Amazon S3 Select to query objects that have the following format properties:

      • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

      • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

      • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

      • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

        For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.

      Working with the Response Body

      Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response.

      GetObject Support

      The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

      • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

      • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon S3 User Guide.

      Special Errors

      For a list of special errors for this operation, see List of SELECT Object Content Error Codes

      The following operations are related to SelectObjectContent:

      " + "documentation":"

      This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

      This action is not supported by Amazon S3 on Outposts.

      For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

      Permissions

      You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

      Object Data Formats

      You can use Amazon S3 Select to query objects that have the following format properties:

      • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

      • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

      • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

      • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

        For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.

      Working with the Response Body

      Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response.

      GetObject Support

      The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

      • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

      • The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.

      Special Errors

      For a list of special errors for this operation, see List of SELECT Object Content Error Codes

      The following operations are related to SelectObjectContent:

      " }, "UploadPart":{ "name":"UploadPart", @@ -1782,7 +1782,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -1794,13 +1794,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2010,7 +2010,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2028,7 +2028,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2040,7 +2040,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2195,7 +2195,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2231,7 +2231,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      Specifies the Amazon Web Services KMS key ID to use for object encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

      ", + "documentation":"

      Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2243,7 +2243,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", + "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2502,7 +2502,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2520,7 +2520,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2532,7 +2532,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2643,7 +2643,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2679,7 +2679,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

      ", + "documentation":"

      Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2691,7 +2691,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", + "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2769,7 +2769,7 @@ "members":{ "Objects":{ "shape":"ObjectIdentifierList", - "documentation":"

      The objects to delete.

      ", + "documentation":"

      The object to delete.

      ", "locationName":"Object" }, "Quiet":{ @@ -3397,7 +3397,7 @@ }, "KMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If the encryption type is aws:kms, this optional value specifies the ID of the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

      " + "documentation":"

      If the encryption type is aws:kms, this optional value specifies the ID of the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in KMS in the Amazon Web Services Key Management Service Developer Guide.

      " }, "KMSContext":{ "shape":"KMSContext", @@ -3591,6 +3591,11 @@ "Status":{ "shape":"BucketAccelerateStatus", "documentation":"

      The accelerate configuration of the bucket.

      " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -3610,6 +3615,11 @@ "documentation":"

      The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

      ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -4648,7 +4658,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -4672,13 +4682,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -5270,7 +5280,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -5294,13 +5304,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -6196,6 +6206,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

      Encoding type used by Amazon S3 to encode object keys in the response.

      If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

      Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.

      " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6250,6 +6265,11 @@ "documentation":"

      The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

      ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -6309,6 +6329,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

      Encoding type used by Amazon S3 to encode object key names in the XML response.

      If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

      KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.

      " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6363,6 +6388,11 @@ "documentation":"

      The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

      ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -6408,6 +6438,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

      Encoding type used by Amazon S3 to encode object keys in the response.

      " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6515,6 +6550,11 @@ "StartAfter":{ "shape":"StartAfter", "documentation":"

      If StartAfter was sent with the request, it is included in the response.

      " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -8637,7 +8677,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8661,7 +8701,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If x-amz-server-side-encryption is has a valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -8673,7 +8713,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -8825,7 +8865,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8861,7 +8901,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If x-amz-server-side-encryption has a valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key to protect the data. If the KMS key does not exist in the same account issuing the command, you must use the full ARN and not just the ID.

      ", + "documentation":"

      If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -8873,7 +8913,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", + "documentation":"

      Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

      Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -9655,7 +9695,7 @@ "members":{ "KeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key to use for encrypting inventory reports.

      " + "documentation":"

      Specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key to use for encrypting inventory reports.

      " } }, "documentation":"

      Specifies the use of SSE-KMS to encrypt delivered inventory reports.

      ", @@ -9834,7 +9874,8 @@ "type":"string", "enum":[ "AES256", - "aws:kms" + "aws:kms", + "aws:kms:dsse" ] }, "ServerSideEncryptionByDefault":{ @@ -10206,7 +10247,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10224,13 +10265,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -10373,7 +10414,7 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", + "documentation":"

      The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

      ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10421,13 +10462,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

      If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key was used for the object.

      ", + "documentation":"

      If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key was used for the object.

      ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

      ", + "documentation":"

      Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

      ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java index 30b779910c47..d3593570a6e6 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java @@ -221,6 +221,41 @@ void multiPartCopy_completeMultipartFailed_shouldFailAndAbort() { assertThat(actualRequest.uploadId()).isEqualTo(MULTIPART_ID); } + @Test + void multiPartCopy_contentSizeExceeds10000Parts_shouldAdjustPartSize() { + long contentLength = 1024L * 10_000 * 2; // twice too many parts with configures part size + + stubSuccessfulHeadObjectCall(contentLength); + stubSuccessfulCreateMulipartCall(); + stubSuccessfulUploadPartCopyCalls(); + stubSuccessfulCompleteMultipartCall(); + + CopyObjectRequest copyObjectRequest = copyObjectRequest(); + + CompletableFuture future = copyHelper.copyObject(copyObjectRequest); + + CopyObjectResponse actualResponse = future.join(); + assertThat(actualResponse.copyObjectResult()).isNotNull(); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(UploadPartCopyRequest.class); + verify(s3AsyncClient, times(10_000)).uploadPartCopy(argumentCaptor.capture()); + List actualUploadPartCopyRequests = argumentCaptor.getAllValues(); + assertThat(actualUploadPartCopyRequests).allSatisfy(d -> { + assertThat(d.sourceBucket()).isEqualTo(SOURCE_BUCKET); + assertThat(d.sourceKey()).isEqualTo(SOURCE_KEY); + assertThat(d.destinationBucket()).isEqualTo(DESTINATION_BUCKET); + assertThat(d.destinationKey()).isEqualTo(DESTINATION_KEY); + }); + + long expectedPartSize = 2048L; + for (int i = 0; i < actualUploadPartCopyRequests.size(); i++) { + int rangeStart = (int) expectedPartSize * i; + int rangeEnd = (int) (rangeStart + (expectedPartSize - 1)); + assertThat(actualUploadPartCopyRequests.get(i).copySourceRange()).isEqualTo( + String.format("bytes=%d-%d", rangeStart, rangeEnd)); + } + } + @Test void copy_cancelResponseFuture_shouldPropagate() { CopyObjectRequest copyObjectRequest = copyObjectRequest(); diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 9dc386bd1213..be9eb25d0bf7 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/resources/codegen-resources/customization.config b/services/s3control/src/main/resources/codegen-resources/customization.config index 9248a2ded08c..2e66096fb5f1 100644 --- a/services/s3control/src/main/resources/codegen-resources/customization.config +++ b/services/s3control/src/main/resources/codegen-resources/customization.config @@ -12,47 +12,9 @@ "skipEndpointTests": { "Vanilla outposts with ARN region + access point ARN@us-west-2": "SDK defaults to useArnRegion = false", "govcloud with fips + arn region@us-gov-west-1": "SDK defaults to useArnRegion = false", - "outpost access points do not support dualstack@us-west-2": "Does not work for client tests because operationInputs needed", - "outpost access points do not support dualstack@cn-north-1": "Does not work for client tests because operationInputs needed", - "outpost access points do not support dualstack@af-south-1": "Does not work for client tests because operationInputs needed", - "invalid ARN: must be include outpost ID@us-west-2": "Does not work for client tests because operationInputs needed", - "invalid ARN: must specify access point@us-west-2": "Does not work for client tests because operationInputs needed", - "invalid ARN@us-west-2": "Does not work for client tests because operationInputs needed", - "when set, AccountId drives AP construction@us-west-2": "Does not work for client tests because operationInputs needed", - "access point name with a bucket arn@us-west-2": "Does not work for client tests because operationInputs needed", - "bucket arn with access point name@us-west-2": "Does not work for client tests because operationInputs needed", - "create bucket with outposts@us-west-2": "Does not work for client tests because operationInputs needed", - "dualstack cannot be used with outposts when an endpoint URL is set@us-west-2.": "Does not work for client tests because operationInputs needed", - "Dual-stack cannot be used with outposts@us-west-2": "Does not work for client tests because operationInputs needed", "bucket arn with UseArnRegion = true (arn region supercedes client configured region)@us-west-2": "Test is broken, uses incorrect Param name for UseArnRegion", - "Outposts do not support dualstack@us-west-2": "Does not work for client tests because operationInputs needed", - "Invalid ARN: missing outpost id and bucket@us-west-2": "Does not work for client tests because operationInputs needed", - "Invalid ARN: missing bucket@us-west-2": "Does not work for client tests because operationInputs needed", - "Invalid ARN: missing outpost and bucket ids@us-west-2": "Does not work for client tests because operationInputs needed", - "Invalid ARN: missing bucket id@us-west-2": "Does not work for client tests because operationInputs needed", - "account id inserted into hostname@us-west-2": "Does not work for client tests because operationInputs needed", - "account id prefix with dualstack@us-east-1": "Does not work for client tests because operationInputs needed", - "account id prefix with fips@us-east-1": "Does not work for client tests because operationInputs needed", - "custom account id prefix with fips@us-east-1": "Does not work for client tests because operationInputs needed", - "standard url @ us-east-1": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "fips url @ us-east-1": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "dualstack url @ us-east-1": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "fips,dualstack url @ us-east-1": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "standard url @ cn-north-1": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "custom endpoint, fips and dualstack": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "custom endpoint, fips": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "custom endpoint, dualstack": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)", - "region not set": "SDK client builder does its own validation", - "invalid partition": "SDK client builder does its own validation", - "outpost accesspoint ARN with missing accountId": "Does not work for client tests because operationInputs needed", - "bucket ARN with missing accountId": "Does not work for client tests because operationInputs needed", - "access point name with an accesspoint arn@us-west-2": "Does not work for client tests because operationInputs needed", - "bucket ARN with mismatched accountId": "Does not work for client tests because operationInputs needed", - "OutpostId with invalid region": "Does not work for client tests because operationInputs needed", - "OutpostId with RequireAccountId unset": "Does not work for client tests because operationInputs needed", "Accesspoint ARN with region mismatch and UseArnRegion unset": "SDK defaults to useArnRegion = false", - "Bucket ARN with region mismatch and UseArnRegion unset": "SDK defaults to useArnRegion = false", - "Accesspoint ARN with region mismatch, UseArnRegion=false and custom endpoint": "Does not work for client tests because operationInputs needed (so an operation that doesn't required AccountId is used)" + "Bucket ARN with region mismatch and UseArnRegion unset": "SDK defaults to useArnRegion = false" }, "interceptors": [ "software.amazon.awssdk.services.s3control.internal.interceptors.ConfigureSignerInterceptor", diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 13fa6776e418..5e9e506803f8 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index df99d957dfca..a4069c37f286 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index a9b5428028f0..6e60773c5c90 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -137,7 +137,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

      Creates an Autopilot job.

      Find the best-performing model after you run an Autopilot job by calling DescribeAutoMLJob.

      For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

      " + "documentation":"

      Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

      We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

      CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification.

      Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

      You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

      " }, "CreateAutoMLJobV2":{ "name":"CreateAutoMLJobV2", @@ -151,7 +151,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

      Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text for Computer Vision or Natural Language Processing problems.

      Find the resulting model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

      To create an AutoMLJob using tabular data, see CreateAutoMLJob.

      This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

      " + "documentation":"

      Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

      CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.

      CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification.

      Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

      For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

      You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

      " }, "CreateCodeRepository":{ "name":"CreateCodeRepository", @@ -1406,7 +1406,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

      Returns information about an Amazon SageMaker AutoML job.

      " + "documentation":"

      Returns information about an AutoML job created by calling CreateAutoMLJob.

      AutoML jobs created by calling CreateAutoMLJobV2 cannot be described by DescribeAutoMLJob.

      " }, "DescribeAutoMLJobV2":{ "name":"DescribeAutoMLJobV2", @@ -1419,7 +1419,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

      Returns information about an Amazon SageMaker AutoML V2 job.

      This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

      " + "documentation":"

      Returns information about an AutoML job created by calling CreateAutoMLJobV2 or CreateAutoMLJob.

      " }, "DescribeCodeRepository":{ "name":"DescribeCodeRepository", @@ -1657,7 +1657,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

      Gets a description of a hyperparameter tuning job.

      " + "documentation":"

      Returns a description of a hyperparameter tuning job, depending on the fields selected. These fields can include the name, Amazon Resource Name (ARN), job status of your tuning job and more.

      " }, "DescribeImage":{ "name":"DescribeImage", @@ -4787,7 +4787,7 @@ }, "InferenceContainerDefinitions":{ "shape":"AutoMLInferenceContainerDefinitions", - "documentation":"

      The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the V2 API only (for example, for jobs created by calling CreateAutoMLJobV2).

      " + "documentation":"

      The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the AutoML jobs V2 (for example, for jobs created by calling CreateAutoMLJobV2) related to image or text classification problem types only.

      " } }, "documentation":"

      Information about a candidate produced by an AutoML training job, including its status, steps, and other properties.

      " @@ -4920,7 +4920,7 @@ "documentation":"

      The validation fraction (optional) is a float that specifies the portion of the training dataset to be used for validation. The default value is 0.2, and values must be greater than 0 and less than 1. We recommend setting this value to be less than 0.5.

      " } }, - "documentation":"

      This structure specifies how to split the data into train and validation datasets.

      If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

      " + "documentation":"

      This structure specifies how to split the data into train and validation datasets.

      The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

      " }, "AutoMLFailureReason":{ "type":"string", @@ -4974,29 +4974,29 @@ }, "ContentType":{ "shape":"ContentType", - "documentation":"

      The content type of the data from the input source. The following are the allowed content types for different problems:

      • ImageClassification: image/png, image/jpeg, image/*

      • TextClassification: text/csv;header=present

      " + "documentation":"

      The content type of the data from the input source. The following are the allowed content types for different problems:

      • For Tabular problem types: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.

      • For ImageClassification: image/png, image/jpeg, or image/*. The default value is image/*.

      • For TextClassification: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.

      " }, "CompressionType":{ "shape":"CompressionType", - "documentation":"

      The allowed compression types depend on the input format. We allow the compression type Gzip for S3Prefix inputs only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

      " + "documentation":"

      The allowed compression types depend on the input format and problem type. We allow the compression type Gzip for S3Prefix inputs on tabular data only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

      " }, "DataSource":{ "shape":"AutoMLDataSource", - "documentation":"

      The data source for an AutoML channel.

      " + "documentation":"

      The data source for an AutoML channel (Required).

      " } }, - "documentation":"

      A channel is a named input source that training algorithms can consume. This channel is used for the non tabular training data of an AutoML job using the V2 API. For tabular training data, see AutoMLChannel. For more information, see Channel.

      " + "documentation":"

      A channel is a named input source that training algorithms can consume. This channel is used for AutoML jobs V2 (jobs created by calling CreateAutoMLJobV2).

      " }, "AutoMLJobCompletionCriteria":{ "type":"structure", "members":{ "MaxCandidates":{ "shape":"MaxCandidates", - "documentation":"

      The maximum number of times a training job is allowed to run.

      For V2 jobs (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

      " + "documentation":"

      The maximum number of times a training job is allowed to run.

      For job V2s (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

      " }, "MaxRuntimePerTrainingJobInSeconds":{ "shape":"MaxRuntimePerTrainingJobInSeconds", - "documentation":"

      The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

      For V2 jobs (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

      " + "documentation":"

      The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

      For job V2s (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

      " }, "MaxAutoMLJobRuntimeInSeconds":{ "shape":"MaxAutoMLJobRuntimeInSeconds", @@ -5026,7 +5026,7 @@ }, "Mode":{ "shape":"AutoMLMode", - "documentation":"

      The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

      The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

      The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

      " + "documentation":"

      The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

      The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

      The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

      " } }, "documentation":"

      A collection of settings used for an AutoML job.

      " @@ -5049,10 +5049,10 @@ "members":{ "MetricName":{ "shape":"AutoMLMetricEnum", - "documentation":"

      The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

      For the list of all available metrics supported by Autopilot, see Autopilot metrics.

      If you do not specify a metric explicitly, the default behavior is to automatically use:

      • MSE: for regression.

      • F1: for binary classification

      • Accuracy: for multiclass classification.

      " + "documentation":"

      The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

      For the list of all available metrics supported by Autopilot, see Autopilot metrics.

      If you do not specify a metric explicitly, the default behavior is to automatically use:

      • For tabular problem types:

        • Regression: MSE.

        • Binary classification: F1.

        • Multiclass classification: Accuracy.

      • For image or text classification problem types: Accuracy

      " } }, - "documentation":"

      Specifies a metric to minimize or maximize as the objective of a job. V2 API jobs (for example jobs created by calling CreateAutoMLJobV2), support Accuracy only.

      " + "documentation":"

      Specifies a metric to minimize or maximize as the objective of a job.

      " }, "AutoMLJobObjectiveType":{ "type":"string", @@ -5250,14 +5250,37 @@ "members":{ "ImageClassificationJobConfig":{ "shape":"ImageClassificationJobConfig", - "documentation":"

      Settings used to configure an AutoML job using the V2 API for the image classification problem type.

      " + "documentation":"

      Settings used to configure an AutoML job V2 for the image classification problem type.

      " }, "TextClassificationJobConfig":{ "shape":"TextClassificationJobConfig", - "documentation":"

      Settings used to configure an AutoML job using the V2 API for the text classification problem type.

      " + "documentation":"

      Settings used to configure an AutoML job V2 for the text classification problem type.

      " + }, + "TabularJobConfig":{ + "shape":"TabularJobConfig", + "documentation":"

      Settings used to configure an AutoML job V2 for a tabular problem type (regression, classification).

      " } }, - "documentation":"

      A collection of settings specific to the problem type used to configure an AutoML job using the V2 API. There must be one and only one config of the following type.

      ", + "documentation":"

      A collection of settings specific to the problem type used to configure an AutoML job V2. There must be one and only one config of the following type.

      ", + "union":true + }, + "AutoMLProblemTypeConfigName":{ + "type":"string", + "enum":[ + "ImageClassification", + "TextClassification", + "Tabular" + ] + }, + "AutoMLProblemTypeResolvedAttributes":{ + "type":"structure", + "members":{ + "TabularResolvedAttributes":{ + "shape":"TabularResolvedAttributes", + "documentation":"

      Defines the resolved attributes for the TABULAR problem type.

      " + } + }, + "documentation":"

      The resolved attributes specific to the problem type of an AutoML job V2.

      ", "union":true }, "AutoMLProcessingUnit":{ @@ -5267,6 +5290,18 @@ "GPU" ] }, + "AutoMLResolvedAttributes":{ + "type":"structure", + "members":{ + "AutoMLJobObjective":{"shape":"AutoMLJobObjective"}, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "AutoMLProblemTypeResolvedAttributes":{ + "shape":"AutoMLProblemTypeResolvedAttributes", + "documentation":"

      Defines the resolved attributes specific to a problem type.

      " + } + }, + "documentation":"

      The resolved attributes used to configure an AutoML job V2.

      " + }, "AutoMLS3DataSource":{ "type":"structure", "required":[ @@ -5326,6 +5361,30 @@ "Descending" ] }, + "AutoParameter":{ + "type":"structure", + "required":[ + "Name", + "ValueHint" + ], + "members":{ + "Name":{ + "shape":"ParameterKey", + "documentation":"

      The name of the hyperparameter to optimize using Autotune.

      " + }, + "ValueHint":{ + "shape":"ParameterValue", + "documentation":"

      An example value of the hyperparameter to optimize using Autotune.

      " + } + }, + "documentation":"

      The name and an example value of the hyperparameter that you want to use in Autotune. If Automatic model tuning (AMT) determines that your hyperparameter is eligible for Autotune, an optimal hyperparameter range is selected for you.

      " + }, + "AutoParameters":{ + "type":"list", + "member":{"shape":"AutoParameter"}, + "max":100, + "min":0 + }, "AutoRollbackConfig":{ "type":"structure", "members":{ @@ -5336,6 +5395,21 @@ }, "documentation":"

      Automatic rollback configuration for handling endpoint deployment failures and recovery.

      " }, + "Autotune":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Mode":{ + "shape":"AutotuneMode", + "documentation":"

      Set Mode to Enabled if you want to use Autotune.

      " + } + }, + "documentation":"

      A flag to indicate if you want to use Autotune to automatically find optimal values for the following fields:

      • ParameterRanges: The names and ranges of parameters that a hyperparameter tuning job can optimize.

      • ResourceLimits: The maximum resources that can be used for a training job. These resources include the maximum number of training jobs, the maximum runtime of a tuning job, and the maximum number of training jobs to run at the same time.

      • TrainingJobEarlyStoppingType: A flag that specifies whether or not to use early stopping for training jobs launched by a hyperparameter tuning job.

      • RetryStrategy: The number of times to retry a training job.

      • Strategy: Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training jobs that it launches.

      • ConvergenceDetected: A flag to indicate that Automatic model tuning (AMT) has detected model convergence.

      " + }, + "AutotuneMode":{ + "type":"string", + "enum":["Enabled"] + }, "AwsManagedHumanLoopRequestSource":{ "type":"string", "enum":[ @@ -5640,6 +5714,16 @@ "type":"string", "min":1 }, + "CandidateGenerationConfig":{ + "type":"structure", + "members":{ + "AlgorithmsConfig":{ + "shape":"AutoMLAlgorithmsConfig", + "documentation":"

      Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.

      The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

      • AlgorithmsConfig should not be set in AUTO training mode.

      • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

        If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

      • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

      For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

      For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

      " + } + }, + "documentation":"

      Stores the configuration information for how model candidates are generated using an AutoML job V2.

      " + }, "CandidateName":{ "type":"string", "max":64, @@ -6693,6 +6777,10 @@ "MultiModelConfig":{ "shape":"MultiModelConfig", "documentation":"

      Specifies additional configuration for multi-model endpoints.

      " + }, + "ModelDataSource":{ + "shape":"ModelDataSource", + "documentation":"

      Specifies the location of ML model data to deploy.

      Currently you cannot use ModelDataSource in conjuction with SageMaker batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace.

      " } }, "documentation":"

      Describes the container, as part of model definition.

      " @@ -7115,7 +7203,7 @@ }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", - "documentation":"

      Defines the objective metric used to measure the predictive quality of an AutoML job. You provide an AutoMLJobObjective$MetricName and Autopilot infers whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported.

      " + "documentation":"

      Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. See AutoMLJobObjective for the default values.

      " }, "AutoMLJobConfig":{ "shape":"AutoMLJobConfig", @@ -7165,7 +7253,7 @@ }, "AutoMLJobInputDataConfig":{ "shape":"AutoMLJobInputDataConfig", - "documentation":"

      An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type:

      • ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile

      • TextClassification: S3Prefix

      " + "documentation":"

      An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the InputDataConfig attribute in the CreateAutoMLJob input parameters. The supported formats depend on the problem type:

      • For Tabular problem types: S3Prefix, ManifestFile.

      • For ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile.

      • For TextClassification: S3Prefix.

      " }, "OutputDataConfig":{ "shape":"AutoMLOutputDataConfig", @@ -7189,7 +7277,7 @@ }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", - "documentation":"

      Specifies a metric to minimize or maximize as the objective of a job. For CreateAutoMLJobV2, only Accuracy is supported.

      " + "documentation":"

      Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective.

      For tabular problem types, you must either provide both the AutoMLJobObjective and indicate the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none at all.

      " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -7197,7 +7285,7 @@ }, "DataSplitConfig":{ "shape":"AutoMLDataSplitConfig", - "documentation":"

      This structure specifies how to split the data into train and validation datasets.

      If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

      " + "documentation":"

      This structure specifies how to split the data into train and validation datasets.

      The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

      " } } }, @@ -7920,6 +8008,10 @@ "Tags":{ "shape":"TagList", "documentation":"

      An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

      Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

      " + }, + "Autotune":{ + "shape":"Autotune", + "documentation":"

      Configures SageMaker Automatic model tuning (AMT) to automatically find optimal parameters for the following fields:

      • ParameterRanges: The names and ranges of parameters that a hyperparameter tuning job can optimize.

      • ResourceLimits: The maximum resources that can be used for a training job. These resources include the maximum number of training jobs, the maximum runtime of a tuning job, and the maximum number of training jobs to run at the same time.

      • TrainingJobEarlyStoppingType: A flag that specifies whether or not to use early stopping for training jobs launched by a hyperparameter tuning job.

      • RetryStrategy: The number of times to retry a training job.

      • Strategy: Specifies how hyperparameter tuning chooses the combinations of hyperparameter values to use for the training jobs that it launches.

      • ConvergenceDetected: A flag to indicate that Automatic model tuning (AMT) has detected model convergence.

      " } } }, @@ -9080,7 +9172,7 @@ }, "InputDataConfig":{ "shape":"InputDataConfig", - "documentation":"

      An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

      Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

      Depending on the input mode that the algorithm supports, SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files are available as input streams. They do not need to be downloaded.

      " + "documentation":"

      An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location.

      Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format.

      Depending on the input mode that the algorithm supports, SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files are available as input streams. They do not need to be downloaded.

      Your input must be in the same Amazon Web Services region as your training job.

      " }, "OutputDataConfig":{ "shape":"OutputDataConfig", @@ -10561,6 +10653,21 @@ }, "documentation":"

      The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations.

      " }, + "DeploymentRecommendation":{ + "type":"structure", + "required":["RecommendationStatus"], + "members":{ + "RecommendationStatus":{ + "shape":"RecommendationStatus", + "documentation":"

      Status of the deployment recommendation. The status NOT_APPLICABLE means that SageMaker is unable to provide a default recommendation for the model using the information provided. If the deployment status is IN_PROGRESS, retry your API call after a few seconds to get a COMPLETED deployment recommendation.

      " + }, + "RealTimeInferenceRecommendations":{ + "shape":"RealTimeInferenceRecommendations", + "documentation":"

      A list of RealTimeInferenceRecommendation items.

      " + } + }, + "documentation":"

      A set of recommended deployment configurations for the model. To get more advanced recommendations, see CreateInferenceRecommendationsJob to create an inference recommendation job.

      " + }, "DeploymentStage":{ "type":"structure", "required":[ @@ -11031,7 +11138,7 @@ }, "ResolvedAttributes":{ "shape":"ResolvedAttributes", - "documentation":"

      Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are auto-inferred. If you do provide them, the values used are the ones you provide.

      " + "documentation":"

      Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are inferred.

      " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -11049,7 +11156,7 @@ "members":{ "AutoMLJobName":{ "shape":"AutoMLJobName", - "documentation":"

      Requests information about an AutoML V2 job using its unique name.

      " + "documentation":"

      Requests information about an AutoML job V2 using its unique name.

      " } } }, @@ -11069,11 +11176,11 @@ "members":{ "AutoMLJobName":{ "shape":"AutoMLJobName", - "documentation":"

      Returns the name of the AutoML V2 job.

      " + "documentation":"

      Returns the name of the AutoML job V2.

      " }, "AutoMLJobArn":{ "shape":"AutoMLJobArn", - "documentation":"

      Returns the Amazon Resource Name (ARN) of the AutoML V2 job.

      " + "documentation":"

      Returns the Amazon Resource Name (ARN) of the AutoML job V2.

      " }, "AutoMLJobInputDataConfig":{ "shape":"AutoMLJobInputDataConfig", @@ -11093,15 +11200,15 @@ }, "AutoMLProblemTypeConfig":{ "shape":"AutoMLProblemTypeConfig", - "documentation":"

      Returns the configuration settings of the problem type set for the AutoML V2 job.

      " + "documentation":"

      Returns the configuration settings of the problem type set for the AutoML job V2.

      " }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

      Returns the creation time of the AutoML V2 job.

      " + "documentation":"

      Returns the creation time of the AutoML job V2.

      " }, "EndTime":{ "shape":"Timestamp", - "documentation":"

      Returns the end time of the AutoML V2 job.

      " + "documentation":"

      Returns the end time of the AutoML job V2.

      " }, "LastModifiedTime":{ "shape":"Timestamp", @@ -11109,11 +11216,11 @@ }, "FailureReason":{ "shape":"AutoMLFailureReason", - "documentation":"

      Returns the reason for the failure of the AutoML V2 job, when applicable.

      " + "documentation":"

      Returns the reason for the failure of the AutoML job V2, when applicable.

      " }, "PartialFailureReasons":{ "shape":"AutoMLPartialFailureReasons", - "documentation":"

      Returns a list of reasons for partial failures within an AutoML V2 job.

      " + "documentation":"

      Returns a list of reasons for partial failures within an AutoML job V2.

      " }, "BestCandidate":{ "shape":"AutoMLCandidate", @@ -11121,11 +11228,11 @@ }, "AutoMLJobStatus":{ "shape":"AutoMLJobStatus", - "documentation":"

      Returns the status of the AutoML V2 job.

      " + "documentation":"

      Returns the status of the AutoML job V2.

      " }, "AutoMLJobSecondaryStatus":{ "shape":"AutoMLJobSecondaryStatus", - "documentation":"

      Returns the secondary status of the AutoML V2 job.

      " + "documentation":"

      Returns the secondary status of the AutoML job V2.

      " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -11142,6 +11249,15 @@ "SecurityConfig":{ "shape":"AutoMLSecurityConfig", "documentation":"

      Returns the security configuration for traffic encryption or Amazon VPC settings.

      " + }, + "AutoMLJobArtifacts":{"shape":"AutoMLJobArtifacts"}, + "ResolvedAttributes":{ + "shape":"AutoMLResolvedAttributes", + "documentation":"

      Returns the resolved attributes used by the AutoML job V2.

      " + }, + "AutoMLProblemTypeConfigName":{ + "shape":"AutoMLProblemTypeConfigName", + "documentation":"

      Returns the name of the problem type configuration set for the AutoML job V2.

      " } } }, @@ -12413,7 +12529,7 @@ "members":{ "HyperParameterTuningJobName":{ "shape":"HyperParameterTuningJobName", - "documentation":"

      The name of the tuning job.

      " + "documentation":"

      The name of the hyperparameter tuning job.

      " }, "HyperParameterTuningJobArn":{ "shape":"HyperParameterTuningJobArn", @@ -12475,7 +12591,11 @@ "shape":"HyperParameterTuningJobCompletionDetails", "documentation":"

      Tuning job completion information returned as the response from a hyperparameter tuning job. This information tells if your tuning job has or has not converged. It also includes the number of training jobs that have not improved model performance as evaluated against the objective function.

      " }, - "ConsumedResources":{"shape":"HyperParameterTuningJobConsumedResources"} + "ConsumedResources":{"shape":"HyperParameterTuningJobConsumedResources"}, + "Autotune":{ + "shape":"Autotune", + "documentation":"

      A flag to indicate if autotune is enabled for the hyperparameter tuning job.

      " + } } }, "DescribeImageRequest":{ @@ -13230,6 +13350,10 @@ "EnableNetworkIsolation":{ "shape":"Boolean", "documentation":"

      If True, no inbound or outbound network calls can be made to or from the model container.

      " + }, + "DeploymentRecommendation":{ + "shape":"DeploymentRecommendation", + "documentation":"

      A set of recommended deployment configurations for the model.

      " } } }, @@ -13728,6 +13852,10 @@ "ParallelismConfiguration":{ "shape":"ParallelismConfiguration", "documentation":"

      The parallelism configuration applied to the pipeline.

      " + }, + "SelectiveExecutionConfig":{ + "shape":"SelectiveExecutionConfig", + "documentation":"

      The selective execution configuration applied to the pipeline run.

      " } } }, @@ -13737,7 +13865,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

      The name of the pipeline to describe.

      " + "documentation":"

      The name or Amazon Resource Name (ARN) of the pipeline to describe.

      " } } }, @@ -17677,7 +17805,7 @@ }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

      The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See SageMaker distributed training jobs for more information.

      " + "documentation":"

      The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See Step 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more information.

      " }, "VolumeSizeInGB":{ "shape":"VolumeSizeInGB", @@ -17987,7 +18115,7 @@ "members":{ "InstanceType":{ "shape":"TrainingInstanceType", - "documentation":"

      The instance type used to run hyperparameter optimization tuning jobs. See descriptions of instance types for more information.

      " + "documentation":"

      The instance type used to run hyperparameter optimization tuning jobs. See descriptions of instance types for more information.

      " }, "InstanceCount":{ "shape":"TrainingInstanceCount", @@ -18133,7 +18261,7 @@ "documentation":"

      How long a job is allowed to run, or how many candidates a job is allowed to generate.

      " } }, - "documentation":"

      Stores the configuration information for the image classification problem of an AutoML job using the V2 API.

      " + "documentation":"

      Stores the configuration information for the image classification problem of an AutoML job V2.

      " }, "ImageConfig":{ "type":"structure", @@ -18578,6 +18706,14 @@ "RecommendationId":{ "shape":"String", "documentation":"

      The recommendation ID which uniquely identifies each recommendation.

      " + }, + "InvocationEndTime":{ + "shape":"InvocationEndTime", + "documentation":"

      A timestamp that shows when the benchmark completed.

      " + }, + "InvocationStartTime":{ + "shape":"InvocationStartTime", + "documentation":"

      A timestamp that shows when the benchmark started.

      " } }, "documentation":"

      A list of recommendations made by Amazon SageMaker Inference Recommender.

      " @@ -18640,6 +18776,18 @@ "FailureReason":{ "shape":"FailureReason", "documentation":"

      If the job fails, provides information why the job failed.

      " + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

      The name of the created model.

      " + }, + "SamplePayloadUrl":{ + "shape":"S3Uri", + "documentation":"

      The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

      " + }, + "ModelPackageVersionArn":{ + "shape":"ModelPackageArn", + "documentation":"

      The Amazon Resource Name (ARN) of a versioned model package.

      " } }, "documentation":"

      A structure that contains a list of recommendation jobs.

      " @@ -18890,7 +19038,13 @@ "ml.g5.16xlarge", "ml.g5.12xlarge", "ml.g5.24xlarge", - "ml.g5.48xlarge" + "ml.g5.48xlarge", + "ml.inf1.xlarge", + "ml.inf1.2xlarge", + "ml.inf1.6xlarge", + "ml.inf1.24xlarge", + "ml.p4d.24xlarge", + "ml.p4de.24xlarge" ] }, "Integer":{"type":"integer"}, @@ -18946,6 +19100,8 @@ "min":0 }, "IntegerValue":{"type":"integer"}, + "InvocationEndTime":{"type":"timestamp"}, + "InvocationStartTime":{"type":"timestamp"}, "InvocationsMaxRetries":{ "type":"integer", "max":3, @@ -19674,7 +19830,7 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

      The maximum number of AppImageConfigs to return in the response. The default value is 10.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " }, "NextToken":{ "shape":"NextToken", @@ -19732,7 +19888,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

      Returns a list up to a specified limit.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " }, "SortOrder":{ "shape":"SortOrder", @@ -20327,7 +20483,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

      Returns a list up to a specified limit.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " } } }, @@ -21271,6 +21427,14 @@ "MaxResults":{ "shape":"MaxResults", "documentation":"

      The maximum number of recommendations to return in the response.

      " + }, + "ModelNameEquals":{ + "shape":"ModelName", + "documentation":"

      A filter that returns only jobs that were created for this model.

      " + }, + "ModelPackageVersionArnEquals":{ + "shape":"ModelPackageArn", + "documentation":"

      A filter that returns only jobs that were created for this versioned model package.

      " } } }, @@ -22359,7 +22523,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

      The name of the pipeline.

      " + "documentation":"

      The name or Amazon Resource Name (ARN) of the pipeline.

      " }, "CreatedAfter":{ "shape":"Timestamp", @@ -22593,7 +22757,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

      Returns a list up to a specified limit.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " }, "SortOrder":{ "shape":"SortOrder", @@ -22675,7 +22839,7 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

      The maximum number of Studio Lifecycle Configurations to return in the response. The default value is 10.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " }, "NextToken":{ "shape":"NextToken", @@ -22720,7 +22884,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

      A token for getting the next set of actions, if there are any.

      " + "documentation":"

      If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

      " }, "StudioLifecycleConfigs":{ "shape":"StudioLifecycleConfigsList", @@ -23080,7 +23244,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

      Returns a list up to a specified limit.

      " + "documentation":"

      The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

      " }, "SortOrder":{ "shape":"SortOrder", @@ -23504,6 +23668,10 @@ "Tags":{ "shape":"TagList", "documentation":"

      A list of key-value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

      " + }, + "DeploymentRecommendation":{ + "shape":"DeploymentRecommendation", + "documentation":"

      A set of recommended deployment configurations for the model.

      " } }, "documentation":"

      The properties of a model as returned by the Search API.

      " @@ -23890,6 +24058,13 @@ }, "documentation":"

      Configures the timeout and maximum number of retries for processing a transform job invocation.

      " }, + "ModelCompressionType":{ + "type":"string", + "enum":[ + "None", + "Gzip" + ] + }, "ModelConfiguration":{ "type":"structure", "members":{ @@ -24088,6 +24263,17 @@ }, "documentation":"

      Data quality constraints and statistics for a model.

      " }, + "ModelDataSource":{ + "type":"structure", + "required":["S3DataSource"], + "members":{ + "S3DataSource":{ + "shape":"S3ModelDataSource", + "documentation":"

      Specifies the S3 location of ML model data to deploy.

      " + } + }, + "documentation":"

      Specifies the location of ML model data to deploy. If specified, you must specify one and only one of the available data sources.

      " + }, "ModelDeployConfig":{ "type":"structure", "members":{ @@ -26276,6 +26462,13 @@ "Descending" ] }, + "OutputCompressionType":{ + "type":"string", + "enum":[ + "GZIP", + "NONE" + ] + }, "OutputConfig":{ "type":"structure", "required":["S3OutputLocation"], @@ -26286,7 +26479,7 @@ }, "TargetDevice":{ "shape":"TargetDevice", - "documentation":"

      Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

      " + "documentation":"

      Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

      Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is available only in US East (Ohio) Region.

      " }, "TargetPlatform":{ "shape":"TargetPlatform", @@ -26314,6 +26507,10 @@ "S3OutputPath":{ "shape":"S3Uri", "documentation":"

      Identifies the S3 path where you want SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

      " + }, + "CompressionType":{ + "shape":"OutputCompressionType", + "documentation":"

      The model output compression type. Select None to output an uncompressed model, recommended for large model outputs. Defaults to gzip.

      " } }, "documentation":"

      Provides information about how to store model training results (model artifacts).

      " @@ -26424,6 +26621,10 @@ "CategoricalParameterRanges":{ "shape":"CategoricalParameterRanges", "documentation":"

      The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that a hyperparameter tuning job searches.

      " + }, + "AutoParameters":{ + "shape":"AutoParameters", + "documentation":"

      A list containing hyperparameter names and example values to be used by Autotune to determine optimal ranges for your tuning job.

      " } }, "documentation":"

      Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job.

      The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified.

      " @@ -26724,6 +26925,10 @@ "PipelineParameters":{ "shape":"ParameterList", "documentation":"

      Contains a list of pipeline parameters. This list can be empty.

      " + }, + "SelectiveExecutionConfig":{ + "shape":"SelectiveExecutionConfig", + "documentation":"

      The selective execution configuration applied to the pipeline run.

      " } }, "documentation":"

      An execution of a pipeline.

      " @@ -26802,6 +27007,10 @@ "Metadata":{ "shape":"PipelineExecutionStepMetadata", "documentation":"

      Metadata to run the pipeline step.

      " + }, + "SelectiveExecutionResult":{ + "shape":"SelectiveExecutionResult", + "documentation":"

      The ARN from an execution of the current pipeline from which results are reused for this step.

      " } }, "documentation":"

      An execution of a step in a pipeline.

      " @@ -28424,7 +28633,7 @@ "documentation":"

      The level of permissions that the user has within the RStudioServerPro app. This value defaults to `User`. The `Admin` value allows the user access to the RStudio Administrative Dashboard.

      " } }, - "documentation":"

      A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes.

      " + "documentation":"

      A collection of settings that configure user interaction with the RStudioServerPro app.

      " }, "RStudioServerProDomainSettings":{ "type":"structure", @@ -28495,6 +28704,34 @@ }, "documentation":"

      The infrastructure configuration for deploying the model to a real-time inference endpoint.

      " }, + "RealTimeInferenceRecommendation":{ + "type":"structure", + "required":[ + "RecommendationId", + "InstanceType" + ], + "members":{ + "RecommendationId":{ + "shape":"String", + "documentation":"

      The recommendation ID which uniquely identifies each recommendation.

      " + }, + "InstanceType":{ + "shape":"ProductionVariantInstanceType", + "documentation":"

      The recommended instance type for Real-Time Inference.

      " + }, + "Environment":{ + "shape":"EnvironmentMap", + "documentation":"

      The recommended environment variables to set in the model container for Real-Time Inference.

      " + } + }, + "documentation":"

      The recommended configuration to use for Real-Time Inference.

      " + }, + "RealTimeInferenceRecommendations":{ + "type":"list", + "member":{"shape":"RealTimeInferenceRecommendation"}, + "max":3, + "min":0 + }, "RealtimeInferenceInstanceTypes":{ "type":"list", "member":{"shape":"ProductionVariantInstanceType"} @@ -28580,7 +28817,15 @@ "shape":"RecommendationFailureReason", "documentation":"

      The reason why a benchmark failed.

      " }, - "EndpointMetrics":{"shape":"InferenceMetrics"} + "EndpointMetrics":{"shape":"InferenceMetrics"}, + "InvocationEndTime":{ + "shape":"InvocationEndTime", + "documentation":"

      A timestamp that shows when the benchmark completed.

      " + }, + "InvocationStartTime":{ + "shape":"InvocationStartTime", + "documentation":"

      A timestamp that shows when the benchmark started.

      " + } }, "documentation":"

      The details for a specific benchmark from an Inference Recommender job.

      " }, @@ -28794,6 +29039,15 @@ }, "documentation":"

      The metrics of recommendations.

      " }, + "RecommendationStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "FAILED", + "NOT_APPLICABLE" + ] + }, "RecommendationStepType":{ "type":"string", "enum":["BENCHMARK"] @@ -29314,7 +29568,7 @@ }, "S3Uri":{ "shape":"S3Uri", - "documentation":"

      Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

      • A key name prefix might look like this: s3://bucketname/exampleprefix

      • A manifest might look like this: s3://bucketname/example.manifest

        A manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of S3Uri. Note that the prefix must be a valid non-empty S3Uri that precludes users from specifying a manifest whose individual S3Uri is sourced from different S3 buckets.

        The following code example shows a valid manifest format:

        [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

        \"relative/path/to/custdata-1\",

        \"relative/path/custdata-2\",

        ...

        \"relative/path/custdata-N\"

        ]

        This JSON is equivalent to the following S3Uri list:

        s3://customer_bucket/some/prefix/relative/path/to/custdata-1

        s3://customer_bucket/some/prefix/relative/path/custdata-2

        ...

        s3://customer_bucket/some/prefix/relative/path/custdata-N

        The complete set of S3Uri in this manifest is the input data for the channel for this data source. The object that each S3Uri points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.

      " + "documentation":"

      Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

      • A key name prefix might look like this: s3://bucketname/exampleprefix

      • A manifest might look like this: s3://bucketname/example.manifest

        A manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of S3Uri. Note that the prefix must be a valid non-empty S3Uri that precludes users from specifying a manifest whose individual S3Uri is sourced from different S3 buckets.

        The following code example shows a valid manifest format:

        [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

        \"relative/path/to/custdata-1\",

        \"relative/path/custdata-2\",

        ...

        \"relative/path/custdata-N\"

        ]

        This JSON is equivalent to the following S3Uri list:

        s3://customer_bucket/some/prefix/relative/path/to/custdata-1

        s3://customer_bucket/some/prefix/relative/path/custdata-2

        ...

        s3://customer_bucket/some/prefix/relative/path/custdata-N

        The complete set of S3Uri in this manifest is the input data for the channel for this data source. The object that each S3Uri points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.

      Your input bucket must be located in same Amazon Web Services region as your training job.

      " }, "S3DataDistributionType":{ "shape":"S3DataDistribution", @@ -29329,7 +29583,7 @@ "documentation":"

      A list of names of instance groups that get data from the S3 data source.

      " } }, - "documentation":"

      Describes the S3 data source.

      " + "documentation":"

      Describes the S3 data source.

      Your input bucket must be in the same Amazon Web Services region as your training job.

      " }, "S3DataType":{ "type":"string", @@ -29339,6 +29593,41 @@ "AugmentedManifestFile" ] }, + "S3ModelDataSource":{ + "type":"structure", + "required":[ + "S3Uri", + "S3DataType", + "CompressionType" + ], + "members":{ + "S3Uri":{ + "shape":"S3ModelUri", + "documentation":"

      Specifies the S3 path of ML model data to deploy.

      " + }, + "S3DataType":{ + "shape":"S3ModelDataType", + "documentation":"

      Specifies the type of ML model data to deploy.

      If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by S3Uri always ends with a forward slash (/).

      If you choose S3Object, S3Uri identifies an object that is the ML model data to deploy.

      " + }, + "CompressionType":{ + "shape":"ModelCompressionType", + "documentation":"

      Specifies how the ML model data is prepared.

      If you choose Gzip and choose S3Object as the value of S3DataType, S3Uri identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment.

      If you choose None and chooose S3Object as the value of S3DataType, S3Uri identifies an object that represents an uncompressed ML model to deploy.

      If you choose None and choose S3Prefix as the value of S3DataType, S3Uri identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy.

      If you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code:

      • If you choose S3Object as the value of S3DataType, then SageMaker will split the key of the S3 object referenced by S3Uri by slash (/), and use the last part as the filename of the file holding the content of the S3 object.

      • If you choose S3Prefix as the value of S3DataType, then for each S3 object under the key name pefix referenced by S3Uri, SageMaker will trim its key by the prefix, and use the remainder as the path (relative to /opt/ml/model) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object.

      • Do not use any of the following as file names or directory names:

        • An empty or blank string

        • A string which contains null bytes

        • A string longer than 255 bytes

        • A single dot (.)

        • A double dot (..)

      • Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects s3://mybucket/model/weights and s3://mybucket/model/weights/part1 and you specify s3://mybucket/model/ as the value of S3Uri and S3Prefix as the value of S3DataType, then it will result in name clash between /opt/ml/model/weights (a regular file) and /opt/ml/model/weights/ (a directory).

      • Do not organize the model artifacts in S3 console using folders. When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure.

      " + } + }, + "documentation":"

      Specifies the S3 location of ML model data to deploy.

      " + }, + "S3ModelDataType":{ + "type":"string", + "enum":[ + "S3Prefix", + "S3Object" + ] + }, + "S3ModelUri":{ + "type":"string", + "max":1024, + "pattern":"^(https|s3)://([^/]+)/?(.*)$" + }, "S3OutputPath":{ "type":"string", "max":1024, @@ -29617,6 +29906,51 @@ "max":5 }, "Seed":{"type":"long"}, + "SelectedStep":{ + "type":"structure", + "required":["StepName"], + "members":{ + "StepName":{ + "shape":"String256", + "documentation":"

      The name of the pipeline step.

      " + } + }, + "documentation":"

      A step selected to run in selective execution mode.

      " + }, + "SelectedStepList":{ + "type":"list", + "member":{"shape":"SelectedStep"}, + "max":50, + "min":1 + }, + "SelectiveExecutionConfig":{ + "type":"structure", + "required":[ + "SourcePipelineExecutionArn", + "SelectedSteps" + ], + "members":{ + "SourcePipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

      The ARN from a reference execution of the current pipeline. Used to copy input collaterals needed for the selected steps to run. The execution status of the pipeline can be either Failed or Success.

      " + }, + "SelectedSteps":{ + "shape":"SelectedStepList", + "documentation":"

      A list of pipeline steps to run. All step(s) in all path(s) between two selected steps should be included.

      " + } + }, + "documentation":"

      The selective execution configuration applied to the pipeline run.

      " + }, + "SelectiveExecutionResult":{ + "type":"structure", + "members":{ + "SourcePipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

      The ARN from an execution of the current pipeline.

      " + } + }, + "documentation":"

      The ARN from an execution of the current pipeline.

      " + }, "SendPipelineExecutionStepFailureRequest":{ "type":"structure", "required":["CallbackToken"], @@ -29929,7 +30263,7 @@ "members":{ "ModelDataUrl":{ "shape":"Url", - "documentation":"

      The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

      The model artifacts must be in an S3 bucket that is in the same region as the algorithm.

      " + "documentation":"

      The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).

      The model artifacts must be in an S3 bucket that is in the same Amazon Web Services region as the algorithm.

      " }, "AlgorithmName":{ "shape":"ArnOrName", @@ -30134,7 +30468,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

      The name of the pipeline.

      " + "documentation":"

      The name or Amazon Resource Name (ARN) of the pipeline.

      " }, "PipelineExecutionDisplayName":{ "shape":"PipelineExecutionName", @@ -30156,6 +30490,10 @@ "ParallelismConfiguration":{ "shape":"ParallelismConfiguration", "documentation":"

      This configuration, if specified, overrides the parallelism configuration of the parent pipeline for this specific run.

      " + }, + "SelectiveExecutionConfig":{ + "shape":"SelectiveExecutionConfig", + "documentation":"

      The selective execution configuration applied to the pipeline run.

      " } } }, @@ -30580,6 +30918,52 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "TabularJobConfig":{ + "type":"structure", + "required":["TargetAttributeName"], + "members":{ + "CandidateGenerationConfig":{ + "shape":"CandidateGenerationConfig", + "documentation":"

      The configuration information of how model candidates are generated.

      " + }, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "FeatureSpecificationS3Uri":{ + "shape":"S3Uri", + "documentation":"

      A URL to the Amazon S3 data source containing selected features from the input data source to run an Autopilot job V2. You can input FeatureAttributeNames (optional) in JSON format as shown below:

      { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

      You can also specify the data type of the feature (optional) in the format shown below:

      { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }

      These column keys may not include the target column.

      In ensembling mode, Autopilot only supports the following data types: numeric, categorical, text, and datetime. In HPO mode, Autopilot can support numeric, categorical, text, datetime, and sequence.

      If only FeatureDataTypes is provided, the column keys (col1, col2,..) should be a subset of the column names in the input data.

      If both FeatureDataTypes and FeatureAttributeNames are provided, then the column keys should be a subset of the column names provided in FeatureAttributeNames.

      The key name FeatureAttributeNames is fixed. The values listed in [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings containing unique values that are a subset of the column names in the input data. The list of columns provided must not include the target column.

      " + }, + "Mode":{ + "shape":"AutoMLMode", + "documentation":"

      The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

      The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

      The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

      " + }, + "GenerateCandidateDefinitionsOnly":{ + "shape":"GenerateCandidateDefinitionsOnly", + "documentation":"

      Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

      " + }, + "ProblemType":{ + "shape":"ProblemType", + "documentation":"

      The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.

      You must either specify the type of supervised learning problem in ProblemType and provide the AutoMLJobObjective metric, or none at all.

      " + }, + "TargetAttributeName":{ + "shape":"TargetAttributeName", + "documentation":"

      The name of the target variable in supervised learning, usually represented by 'y'.

      " + }, + "SampleWeightAttributeName":{ + "shape":"SampleWeightAttributeName", + "documentation":"

      If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation.

      Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded.

      Support for sample weights is available in Ensembling mode only.

      " + } + }, + "documentation":"

      The collection of settings used by an AutoML job V2 for the TABULAR problem type.

      " + }, + "TabularResolvedAttributes":{ + "type":"structure", + "members":{ + "ProblemType":{ + "shape":"ProblemType", + "documentation":"

      The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see Amazon SageMaker Autopilot problem types.

      " + } + }, + "documentation":"

      The resolved attributes specific to the TABULAR problem type.

      " + }, "Tag":{ "type":"structure", "required":[ @@ -30638,6 +31022,8 @@ "ml_p3", "ml_g4dn", "ml_inf1", + "ml_inf2", + "ml_trn1", "ml_eia2", "jetson_tx1", "jetson_tx2", @@ -30819,14 +31205,14 @@ }, "ContentColumn":{ "shape":"ContentColumn", - "documentation":"

      The name of the column used to provide the sentences to be classified. It should not be the same as the target column.

      " + "documentation":"

      The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).

      " }, "TargetLabelColumn":{ "shape":"TargetLabelColumn", - "documentation":"

      The name of the column used to provide the class labels. It should not be same as the content column.

      " + "documentation":"

      The name of the column used to provide the class labels. It should not be same as the content column (Required).

      " } }, - "documentation":"

      Stores the configuration information for the text classification problem of an AutoML job using the V2 API.

      " + "documentation":"

      Stores the configuration information for the text classification problem of an AutoML job V2.

      " }, "ThingName":{ "type":"string", @@ -31021,7 +31407,8 @@ "ml.g5.24xlarge", "ml.g5.48xlarge", "ml.trn1.2xlarge", - "ml.trn1.32xlarge" + "ml.trn1.32xlarge", + "ml.trn1n.32xlarge" ] }, "TrainingInstanceTypes":{ @@ -31081,7 +31468,7 @@ }, "InputDataConfig":{ "shape":"InputDataConfig", - "documentation":"

      An array of Channel objects that describes each data input channel.

      " + "documentation":"

      An array of Channel objects that describes each data input channel.

      Your input must be in the same Amazon Web Services region as your training job.

      " }, "OutputDataConfig":{ "shape":"OutputDataConfig", diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index ac1bee5fc620..31681ee5c26c 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index f9745528c6e8..b503a9a03bc9 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 4081c194659a..0e727da7aaed 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 7f4380825bde..421b40a4fb33 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakergeospatial/src/main/resources/codegen-resources/endpoint-tests.json b/services/sagemakergeospatial/src/main/resources/codegen-resources/endpoint-tests.json index 913aa5816bf4..98c7b87747f7 100644 --- a/services/sagemakergeospatial/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sagemakergeospatial/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -123,9 +123,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -147,9 +147,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -160,9 +160,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -173,9 +173,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -186,9 +186,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -199,9 +199,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -247,9 +247,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -260,9 +260,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } diff --git a/services/sagemakergeospatial/src/main/resources/codegen-resources/service-2.json b/services/sagemakergeospatial/src/main/resources/codegen-resources/service-2.json index ed46564e5a89..712ccbe08daf 100644 --- a/services/sagemakergeospatial/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemakergeospatial/src/main/resources/codegen-resources/service-2.json @@ -682,17 +682,6 @@ }, "documentation":"

      The structure representing the EoCloudCover filter.

      " }, - "EojDataSourceConfigInput":{ - "type":"structure", - "members":{ - "S3Data":{ - "shape":"S3DataInput", - "documentation":"

      The input structure for S3Data; representing the Amazon S3 location of the input data objects.

      " - } - }, - "documentation":"

      Union representing different data sources to be used as input for an Earth Observation job.

      ", - "union":true - }, "ExecutionRoleArn":{ "type":"string", "max":2048, @@ -1295,10 +1284,6 @@ "InputConfigInput":{ "type":"structure", "members":{ - "DataSourceConfig":{ - "shape":"EojDataSourceConfigInput", - "documentation":"

      The location of the input data.>

      " - }, "PreviousEarthObservationJobArn":{ "shape":"EarthObservationJobArn", "documentation":"

      The Amazon Resource Name (ARN) of the previous Earth Observation job.

      " @@ -1313,10 +1298,6 @@ "InputConfigOutput":{ "type":"structure", "members":{ - "DataSourceConfig":{ - "shape":"EojDataSourceConfigInput", - "documentation":"

      The location of the input data.

      " - }, "PreviousEarthObservationJobArn":{ "shape":"EarthObservationJobArn", "documentation":"

      The Amazon Resource Name (ARN) of the previous Earth Observation job.

      " @@ -1732,10 +1713,6 @@ }, "documentation":"

      The input structure for Map Matching operation type.

      " }, - "MetadataProvider":{ - "type":"string", - "enum":["PLANET_ORDER"] - }, "MultiPolygonGeometryInput":{ "type":"structure", "required":["Coordinates"], @@ -2150,28 +2127,6 @@ }, "documentation":"

      The input structure for Reverse Geocoding operation type.

      " }, - "S3DataInput":{ - "type":"structure", - "required":[ - "MetadataProvider", - "S3Uri" - ], - "members":{ - "KmsKeyId":{ - "shape":"KmsKey", - "documentation":"

      The Key Management Service key ID for server-side encryption.

      " - }, - "MetadataProvider":{ - "shape":"MetadataProvider", - "documentation":"

      Metadata provider from whom the Amazon S3 data has been acquired.

      " - }, - "S3Uri":{ - "shape":"S3Uri", - "documentation":"

      The URL to the Amazon S3 input.

      " - } - }, - "documentation":"

      Path to Amazon S3 storage location for input data.

      " - }, "S3Uri":{ "type":"string", "pattern":"^s3://([^/]+)/?(.*)$" @@ -2256,6 +2211,7 @@ "StartEarthObservationJobInput":{ "type":"structure", "required":[ + "ExecutionRoleArn", "InputConfig", "JobConfig", "Name" @@ -2308,6 +2264,7 @@ "Arn", "CreationTime", "DurationInSeconds", + "ExecutionRoleArn", "JobConfig", "Name", "Status" diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index 7ce77fa6c6fe..6ba9f0c1eb94 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index ea477cbb6aeb..43e13eb75277 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 2e4dbffbd51b..37bd87ec9b2e 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 590e58347431..dbe845436ae5 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 9e00aba76953..181f156e5111 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index a28b86384b19..5714c74aef3a 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 96a4db25d314..201da72ec5f2 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 4f32153d6dfe..f2adc9ddd778 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,23 @@ "deprecated":true, "deprecatedMessage":"This API has been deprecated, use AcceptAdministratorInvitation API instead." }, + "BatchDeleteAutomationRules":{ + "name":"BatchDeleteAutomationRules", + "http":{ + "method":"POST", + "requestUri":"/automationrules/delete" + }, + "input":{"shape":"BatchDeleteAutomationRulesRequest"}, + "output":{"shape":"BatchDeleteAutomationRulesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Deletes one or more automation rules.

      " + }, "BatchDisableStandards":{ "name":"BatchDisableStandards", "http":{ @@ -80,6 +97,24 @@ ], "documentation":"

      Enables the standards specified by the provided StandardsArn. To obtain the ARN for a standard, use the DescribeStandards operation.

      For more information, see the Security Standards section of the Security Hub User Guide.

      " }, + "BatchGetAutomationRules":{ + "name":"BatchGetAutomationRules", + "http":{ + "method":"POST", + "requestUri":"/automationrules/get" + }, + "input":{"shape":"BatchGetAutomationRulesRequest"}, + "output":{"shape":"BatchGetAutomationRulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Retrieves a list of details for automation rules based on rule Amazon Resource Names (ARNs).

      " + }, "BatchGetSecurityControls":{ "name":"BatchGetSecurityControls", "http":{ @@ -128,6 +163,23 @@ ], "documentation":"

      Imports security findings generated by a finding provider into Security Hub. This action is requested by the finding provider to import its findings into Security Hub.

      BatchImportFindings must be called by one of the following:

      • The Amazon Web Services account that is associated with a finding if you are using the default product ARN or are a partner sending findings from within a customer's Amazon Web Services account. In these cases, the identifier of the account that you are calling BatchImportFindings from needs to be the same as the AwsAccountId attribute for the finding.

      • An Amazon Web Services account that Security Hub has allow-listed for an official partner integration. In this case, you can call BatchImportFindings from the allow-listed account and send findings from different customer accounts in the same batch.

      The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

      After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

      • Note

      • UserDefinedFields

      • VerificationState

      • Workflow

      Finding providers also should not use BatchImportFindings to update the following attributes.

      • Confidence

      • Criticality

      • RelatedFindings

      • Severity

      • Types

      Instead, finding providers use FindingProviderFields to provide values for these attributes.

      " }, + "BatchUpdateAutomationRules":{ + "name":"BatchUpdateAutomationRules", + "http":{ + "method":"PATCH", + "requestUri":"/automationrules/update" + }, + "input":{"shape":"BatchUpdateAutomationRulesRequest"}, + "output":{"shape":"BatchUpdateAutomationRulesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

      Updates one or more automation rules based on rule Amazon Resource Names (ARNs) and input parameters.

      " + }, "BatchUpdateFindings":{ "name":"BatchUpdateFindings", "http":{ @@ -177,6 +229,23 @@ ], "documentation":"

      Creates a custom action target in Security Hub.

      You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

      " }, + "CreateAutomationRule":{ + "name":"CreateAutomationRule", + "http":{ + "method":"POST", + "requestUri":"/automationrules/create" + }, + "input":{"shape":"CreateAutomationRuleRequest"}, + "output":{"shape":"CreateAutomationRuleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

      Creates an automation rule based on input parameters.

      " + }, "CreateFindingAggregator":{ "name":"CreateFindingAggregator", "http":{ @@ -763,6 +832,23 @@ ], "documentation":"

      Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from.

      This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations.

      Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

      When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account.

      " }, + "ListAutomationRules":{ + "name":"ListAutomationRules", + "http":{ + "method":"GET", + "requestUri":"/automationrules/list" + }, + "input":{"shape":"ListAutomationRulesRequest"}, + "output":{"shape":"ListAutomationRulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

      A list of automation rules and their metadata for the calling account.

      " + }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", "http":{ @@ -1144,6 +1230,12 @@ }, "documentation":"

      Provides details about one of the following actions that affects or that was taken on a resource:

      • A remote IP address issued an Amazon Web Services API call

      • A DNS request was received

      • A remote IP address attempted to connect to an EC2 instance

      • A remote IP address attempted a port probe on an EC2 instance

      " }, + "ActionList":{ + "type":"list", + "member":{"shape":"AutomationRulesAction"}, + "max":1, + "min":1 + }, "ActionLocalIpDetails":{ "type":"structure", "members":{ @@ -1335,51 +1427,568 @@ "type":"list", "member":{"shape":"AssociationSetDetails"} }, - "AssociationStateDetails":{ + "AssociationStateDetails":{ + "type":"structure", + "members":{ + "State":{ + "shape":"NonEmptyString", + "documentation":"

      The state of the association.

      " + }, + "StatusMessage":{ + "shape":"NonEmptyString", + "documentation":"

      The status message, if applicable.

      " + } + }, + "documentation":"

      Describes the state of an association between a route table and a subnet or gateway.

      " + }, + "AssociationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "AutoEnableStandards":{ + "type":"string", + "enum":[ + "NONE", + "DEFAULT" + ] + }, + "AutomationRulesAction":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"AutomationRulesActionType", + "documentation":"

      Specifies that the rule action should update the Types finding field. The Types finding field provides one or more finding types in the format of namespace/category/classifier that classify a finding. For more information, see Types taxonomy for ASFF in the Security Hub User Guide.

      " + }, + "FindingFieldsUpdate":{ + "shape":"AutomationRulesFindingFieldsUpdate", + "documentation":"

      Specifies that the automation rule action is an update to a finding field.

      " + } + }, + "documentation":"

      One or more actions to update finding fields if a finding matches the defined criteria of the rule.

      " + }, + "AutomationRulesActionType":{ + "type":"string", + "enum":["FINDING_FIELDS_UPDATE"] + }, + "AutomationRulesArnsList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, + "AutomationRulesConfig":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of a rule.

      " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

      Whether the rule is active after it is created. If this parameter is equal to >ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created.

      " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

      An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

      " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the rule.

      " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the rule.

      " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

      Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

      " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

      A set of Amazon Web Services Security Finding Format finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

      " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

      One or more actions to update finding fields if a finding matches the defined criteria of the rule.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      A timestamp that indicates when the rule was created.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

      A timestamp that indicates when the rule was most recently updated.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "CreatedBy":{ + "shape":"NonEmptyString", + "documentation":"

      The principal that created a rule.

      " + } + }, + "documentation":"

      Defines the configuration of an automation rule.

      " + }, + "AutomationRulesConfigList":{ + "type":"list", + "member":{"shape":"AutomationRulesConfig"} + }, + "AutomationRulesFindingFieldsUpdate":{ + "type":"structure", + "members":{ + "Note":{"shape":"NoteUpdate"}, + "Severity":{"shape":"SeverityUpdate"}, + "VerificationState":{ + "shape":"VerificationState", + "documentation":"

      The rule action will update the VerificationState field of a finding.

      " + }, + "Confidence":{ + "shape":"RatioScale", + "documentation":"

      The rule action will update the Confidence field of a finding.

      " + }, + "Criticality":{ + "shape":"RatioScale", + "documentation":"

      The rule action will update the Criticality field of a finding.

      " + }, + "Types":{ + "shape":"TypeList", + "documentation":"

      The rule action will update the Types field of a finding.

      " + }, + "UserDefinedFields":{ + "shape":"FieldMap", + "documentation":"

      The rule action will update the UserDefinedFields field of a finding.

      " + }, + "Workflow":{"shape":"WorkflowUpdate"}, + "RelatedFindings":{ + "shape":"RelatedFindingList", + "documentation":"

      A list of findings that are related to a finding.

      " + } + }, + "documentation":"

      Identifies the finding fields that the automation rule action will update when a finding matches the defined criteria.

      " + }, + "AutomationRulesFindingFilters":{ + "type":"structure", + "members":{ + "ProductArn":{ + "shape":"StringFilterList", + "documentation":"

      The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.

      " + }, + "AwsAccountId":{ + "shape":"StringFilterList", + "documentation":"

      The Amazon Web Services account ID in which a finding was generated.

      " + }, + "Id":{ + "shape":"StringFilterList", + "documentation":"

      The product-specific identifier for a finding.

      " + }, + "GeneratorId":{ + "shape":"StringFilterList", + "documentation":"

      The identifier for the solution-specific component that generated a finding.

      " + }, + "Type":{ + "shape":"StringFilterList", + "documentation":"

      One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide.

      " + }, + "FirstObservedAt":{ + "shape":"DateFilterList", + "documentation":"

      A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "LastObservedAt":{ + "shape":"DateFilterList", + "documentation":"

      A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "CreatedAt":{ + "shape":"DateFilterList", + "documentation":"

      A timestamp that indicates when this finding record was created.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "UpdatedAt":{ + "shape":"DateFilterList", + "documentation":"

      A timestamp that indicates when the finding record was most recently updated.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "Confidence":{ + "shape":"NumberFilterList", + "documentation":"

      The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide.

      " + }, + "Criticality":{ + "shape":"NumberFilterList", + "documentation":"

      The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide.

      " + }, + "Title":{ + "shape":"StringFilterList", + "documentation":"

      A finding's title.

      " + }, + "Description":{ + "shape":"StringFilterList", + "documentation":"

      A finding's description.

      " + }, + "SourceUrl":{ + "shape":"StringFilterList", + "documentation":"

      Provides a URL that links to a page about the current finding in the finding product.

      " + }, + "ProductName":{ + "shape":"StringFilterList", + "documentation":"

      Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.

      " + }, + "CompanyName":{ + "shape":"StringFilterList", + "documentation":"

      The name of the company for the product that generated the finding. For control-based findings, the company is Amazon Web Services.

      " + }, + "SeverityLabel":{ + "shape":"StringFilterList", + "documentation":"

      The severity value of the finding.

      " + }, + "ResourceType":{ + "shape":"StringFilterList", + "documentation":"

      The type of resource that the finding pertains to.

      " + }, + "ResourceId":{ + "shape":"StringFilterList", + "documentation":"

      The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Service that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource.

      " + }, + "ResourcePartition":{ + "shape":"StringFilterList", + "documentation":"

      The partition in which the resource that the finding pertains to is located. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

      " + }, + "ResourceRegion":{ + "shape":"StringFilterList", + "documentation":"

      The Amazon Web Services Region where the resource that a finding pertains to is located.

      " + }, + "ResourceTags":{ + "shape":"MapFilterList", + "documentation":"

      A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

      " + }, + "ResourceDetailsOther":{ + "shape":"MapFilterList", + "documentation":"

      Custom fields and values about the resource that a finding pertains to.

      " + }, + "ComplianceStatus":{ + "shape":"StringFilterList", + "documentation":"

      The result of a security check. This field is only used for findings generated from controls.

      " + }, + "ComplianceSecurityControlId":{ + "shape":"StringFilterList", + "documentation":"

      The security control ID for which a finding was generated. Security control IDs are the same across standards.

      " + }, + "ComplianceAssociatedStandardsId":{ + "shape":"StringFilterList", + "documentation":"

      The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the DescribeStandards API response.

      " + }, + "VerificationState":{ + "shape":"StringFilterList", + "documentation":"

      Provides the veracity of a finding.

      " + }, + "WorkflowStatus":{ + "shape":"StringFilterList", + "documentation":"

      Provides information about the status of the investigation into a finding.

      " + }, + "RecordState":{ + "shape":"StringFilterList", + "documentation":"

      Provides the current state of a finding.

      " + }, + "RelatedFindingsProductArn":{ + "shape":"StringFilterList", + "documentation":"

      The ARN for the product that generated a related finding.

      " + }, + "RelatedFindingsId":{ + "shape":"StringFilterList", + "documentation":"

      The product-generated identifier for a related finding.

      " + }, + "NoteText":{ + "shape":"StringFilterList", + "documentation":"

      The text of a user-defined note that's added to a finding.

      " + }, + "NoteUpdatedAt":{ + "shape":"DateFilterList", + "documentation":"

      The timestamp of when the note was updated. Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "NoteUpdatedBy":{ + "shape":"StringFilterList", + "documentation":"

      The principal that created a note.

      " + }, + "UserDefinedFields":{ + "shape":"MapFilterList", + "documentation":"

      A list of user-defined name and value string pairs added to a finding.

      " + } + }, + "documentation":"

      The criteria that determine which findings a rule applies to.

      " + }, + "AutomationRulesMetadata":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) for the rule.

      " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

      Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

      " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

      An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

      " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the rule.

      " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the rule.

      " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

      Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

      " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

      A timestamp that indicates when the rule was created.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

      A timestamp that indicates when the rule was most recently updated.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "CreatedBy":{ + "shape":"NonEmptyString", + "documentation":"

      The principal that created a rule.

      " + } + }, + "documentation":"

      Metadata for automation rules in the calling account. The response includes rules with a RuleStatus of ENABLED and DISABLED.

      " + }, + "AutomationRulesMetadataList":{ + "type":"list", + "member":{"shape":"AutomationRulesMetadata"} + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the Availability Zone.

      " + }, + "SubnetId":{ + "shape":"NonEmptyString", + "documentation":"

      The ID of the subnet. You can specify one subnet per Availability Zone.

      " + } + }, + "documentation":"

      Information about an Availability Zone.

      " + }, + "AvailabilityZones":{ + "type":"list", + "member":{"shape":"AvailabilityZone"} + }, + "AwsAmazonMqBrokerDetails":{ + "type":"structure", + "members":{ + "AuthenticationStrategy":{ + "shape":"NonEmptyString", + "documentation":"

      The authentication strategy used to secure the broker. The default is SIMPLE.

      " + }, + "AutoMinorVersionUpgrade":{ + "shape":"Boolean", + "documentation":"

      Whether automatically upgrade new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot.

      " + }, + "BrokerArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the broker.

      " + }, + "BrokerName":{ + "shape":"NonEmptyString", + "documentation":"

      The broker's name.

      " + }, + "DeploymentMode":{ + "shape":"NonEmptyString", + "documentation":"

      The broker's deployment mode.

      " + }, + "EncryptionOptions":{ + "shape":"AwsAmazonMqBrokerEncryptionOptionsDetails", + "documentation":"

      Encryption options for the broker. Doesn’t apply to RabbitMQ brokers.

      " + }, + "EngineType":{ + "shape":"NonEmptyString", + "documentation":"

      The type of broker engine.

      " + }, + "EngineVersion":{ + "shape":"NonEmptyString", + "documentation":"

      The version of the broker engine.

      " + }, + "HostInstanceType":{ + "shape":"NonEmptyString", + "documentation":"

      The broker's instance type.

      " + }, + "BrokerId":{ + "shape":"NonEmptyString", + "documentation":"

      The unique ID that Amazon MQ generates for the broker.

      " + }, + "LdapServerMetadata":{ + "shape":"AwsAmazonMqBrokerLdapServerMetadataDetails", + "documentation":"

      The metadata of the Lightweight Directory Access Protocol (LDAP) server used to authenticate and authorize connections to the broker. This is an optional failover server.

      " + }, + "Logs":{ + "shape":"AwsAmazonMqBrokerLogsDetails", + "documentation":"

      Turns on Amazon CloudWatch logging for brokers.

      " + }, + "MaintenanceWindowStartTime":{ + "shape":"AwsAmazonMqBrokerMaintenanceWindowStartTimeDetails", + "documentation":"

      The scheduled time period (UTC) during which Amazon MQ begins to apply pending updates or patches to the broker.

      " + }, + "PubliclyAccessible":{ + "shape":"Boolean", + "documentation":"

      Permits connections from applications outside of the VPC that hosts the broker's subnets.

      " + }, + "SecurityGroups":{ + "shape":"StringList", + "documentation":"

      The list of rules (one minimum, 125 maximum) that authorize connections to brokers.

      " + }, + "StorageType":{ + "shape":"NonEmptyString", + "documentation":"

      The broker's storage type.

      " + }, + "SubnetIds":{ + "shape":"StringList", + "documentation":"

      The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones.

      " + }, + "Users":{ + "shape":"AwsAmazonMqBrokerUsersList", + "documentation":"

      The list of all broker usernames for the specified broker. Doesn't apply to RabbitMQ brokers.

      " + } + }, + "documentation":"

      Provides details about an Amazon MQ message broker. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols.

      " + }, + "AwsAmazonMqBrokerEncryptionOptionsDetails":{ + "type":"structure", + "members":{ + "KmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

      The KMS key that’s used to encrypt your data at rest. If not provided, Amazon MQ will use a default KMS key to encrypt your data.

      " + }, + "UseAwsOwnedKey":{ + "shape":"Boolean", + "documentation":"

      Specifies that an KMS key should be used for at-rest encryption. Set to true by default if no value is provided (for example, for RabbitMQ brokers).

      " + } + }, + "documentation":"

      Provides details about broker encryption options.

      " + }, + "AwsAmazonMqBrokerLdapServerMetadataDetails":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"StringList", + "documentation":"

      Specifies the location of the LDAP server, such as Amazon Web Services Directory Service for Microsoft Active Directory.

      " + }, + "RoleBase":{ + "shape":"NonEmptyString", + "documentation":"

      The distinguished name of the node in the directory information tree (DIT) to search for roles or groups.

      " + }, + "RoleName":{ + "shape":"NonEmptyString", + "documentation":"

      The group name attribute in a role entry whose value is the name of that role.

      " + }, + "RoleSearchMatching":{ + "shape":"NonEmptyString", + "documentation":"

      The LDAP search filter used to find roles within the roleBase.

      " + }, + "RoleSearchSubtree":{ + "shape":"Boolean", + "documentation":"

      The directory search scope for the role. If set to true, the scope is to search the entire subtree.

      " + }, + "ServiceAccountUsername":{ + "shape":"NonEmptyString", + "documentation":"

      A username for the service account, which is an account in your LDAP server that has access to initiate a connection.

      " + }, + "UserBase":{ + "shape":"NonEmptyString", + "documentation":"

      Selects a particular subtree of the directory information tree (DIT) to search for user entries.

      " + }, + "UserRoleName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the LDAP attribute in the user's directory entry for the user's group membership.

      " + }, + "UserSearchMatching":{ + "shape":"NonEmptyString", + "documentation":"

      The LDAP search filter used to find users within the userBase.

      " + }, + "UserSearchSubtree":{ + "shape":"Boolean", + "documentation":"

      The directory search scope for the user. If set to true, the scope is to search the entire subtree.

      " + } + }, + "documentation":"

      The metadata of the Lightweight Directory Access Protocol (LDAP) server used to authenticate and authorize connections to the broker. This is an optional failover server.

      " + }, + "AwsAmazonMqBrokerLogsDetails":{ + "type":"structure", + "members":{ + "Audit":{ + "shape":"Boolean", + "documentation":"

      Activates audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged. Doesn't apply to RabbitMQ brokers.

      " + }, + "General":{ + "shape":"Boolean", + "documentation":"

      Activates general logging.

      " + }, + "AuditLogGroup":{ + "shape":"NonEmptyString", + "documentation":"

      The location of the CloudWatch Logs log group where audit logs are sent.

      " + }, + "GeneralLogGroup":{ + "shape":"NonEmptyString", + "documentation":"

      The location of the CloudWatch Logs log group where general logs are sent.

      " + }, + "Pending":{ + "shape":"AwsAmazonMqBrokerLogsPendingDetails", + "documentation":"

      The list of information about logs that are to be turned on for the specified broker.

      " + } + }, + "documentation":"

      Provides information about logs to be activated for the specified broker.

      " + }, + "AwsAmazonMqBrokerLogsPendingDetails":{ + "type":"structure", + "members":{ + "Audit":{ + "shape":"Boolean", + "documentation":"

      Activates audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged. Doesn't apply to RabbitMQ brokers.

      " + }, + "General":{ + "shape":"Boolean", + "documentation":"

      Activates general logging.

      " + } + }, + "documentation":"

      Provides information about logs to be activated for the specified broker.

      " + }, + "AwsAmazonMqBrokerMaintenanceWindowStartTimeDetails":{ "type":"structure", "members":{ - "State":{ + "DayOfWeek":{ "shape":"NonEmptyString", - "documentation":"

      The state of the association.

      " + "documentation":"

      The day of the week on which the maintenance window falls.

      " }, - "StatusMessage":{ + "TimeOfDay":{ "shape":"NonEmptyString", - "documentation":"

      The status message, if applicable.

      " + "documentation":"

      The time, in 24-hour format, on which the maintenance window falls.

      " + }, + "TimeZone":{ + "shape":"NonEmptyString", + "documentation":"

      The time zone in either the Country/City format or the UTC offset format. UTC is the default format.

      " } }, - "documentation":"

      Describes the state of an association between a route table and a subnet or gateway.

      " - }, - "AssociationStatus":{ - "type":"string", - "enum":[ - "ENABLED", - "DISABLED" - ] - }, - "AutoEnableStandards":{ - "type":"string", - "enum":[ - "NONE", - "DEFAULT" - ] + "documentation":"

      The scheduled time period (UTC) during which Amazon MQ begins to apply pending updates or patches to the broker.

      " }, - "AvailabilityZone":{ + "AwsAmazonMqBrokerUsersDetails":{ "type":"structure", "members":{ - "ZoneName":{ + "PendingChange":{ "shape":"NonEmptyString", - "documentation":"

      The name of the Availability Zone.

      " + "documentation":"

      The type of change pending for the broker user.

      " }, - "SubnetId":{ + "Username":{ "shape":"NonEmptyString", - "documentation":"

      The ID of the subnet. You can specify one subnet per Availability Zone.

      " + "documentation":"

      The username of the broker user.

      " } }, - "documentation":"

      Information about an Availability Zone.

      " + "documentation":"

      Provides details about the broker usernames for the specified broker. Doesn't apply to RabbitMQ brokers.

      " }, - "AvailabilityZones":{ + "AwsAmazonMqBrokerUsersList":{ "type":"list", - "member":{"shape":"AvailabilityZone"} + "member":{"shape":"AwsAmazonMqBrokerUsersDetails"} }, "AwsApiCallAction":{ "type":"structure", @@ -1775,6 +2384,166 @@ }, "documentation":"

      Contains information about a version 2 stage for Amazon API Gateway.

      " }, + "AwsAppSyncGraphQlApiAdditionalAuthenticationProvidersDetails":{ + "type":"structure", + "members":{ + "AuthenticationType":{ + "shape":"NonEmptyString", + "documentation":"

      The type of security configuration for your GraphQL API: API key, Identity and Access Management (IAM), OpenID Connect (OIDC), Amazon Cognito user pools, or Lambda.

      " + }, + "LambdaAuthorizerConfig":{ + "shape":"AwsAppSyncGraphQlApiLambdaAuthorizerConfigDetails", + "documentation":"

      The configuration for Lambda function authorization.

      " + }, + "OpenIdConnectConfig":{ + "shape":"AwsAppSyncGraphQlApiOpenIdConnectConfigDetails", + "documentation":"

      The OpenID Connect configuration.

      " + }, + "UserPoolConfig":{ + "shape":"AwsAppSyncGraphQlApiUserPoolConfigDetails", + "documentation":"

      The Amazon Cognito user pools configuration.

      " + } + }, + "documentation":"

      A list of additional authentication providers for the GraphqlApi API.

      " + }, + "AwsAppSyncGraphQlApiAdditionalAuthenticationProvidersList":{ + "type":"list", + "member":{"shape":"AwsAppSyncGraphQlApiAdditionalAuthenticationProvidersDetails"} + }, + "AwsAppSyncGraphQlApiDetails":{ + "type":"structure", + "members":{ + "ApiId":{ + "shape":"NonEmptyString", + "documentation":"

      The unique identifier for the API.

      " + }, + "Id":{ + "shape":"NonEmptyString", + "documentation":"

      The unique identifier for the API.

      " + }, + "OpenIdConnectConfig":{ + "shape":"AwsAppSyncGraphQlApiOpenIdConnectConfigDetails", + "documentation":"

      Specifies the authorization configuration for using an OpenID Connect compliant service with an AppSync GraphQL API endpoint.

      " + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

      The API name.

      " + }, + "LambdaAuthorizerConfig":{ + "shape":"AwsAppSyncGraphQlApiLambdaAuthorizerConfigDetails", + "documentation":"

      Specifies the configuration for Lambda function authorization.

      " + }, + "XrayEnabled":{ + "shape":"Boolean", + "documentation":"

      Indicates whether to use X-Ray tracing for the GraphQL API.

      " + }, + "Arn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the API.

      " + }, + "UserPoolConfig":{ + "shape":"AwsAppSyncGraphQlApiUserPoolConfigDetails", + "documentation":"

      The Amazon Cognito user pools configuration.

      " + }, + "AuthenticationType":{ + "shape":"NonEmptyString", + "documentation":"

      The type of security configuration for your GraphQL API: API key, Identity and Access Management (IAM), OpenID Connect (OIDC), Amazon Cognito user pools, or Lambda.

      " + }, + "LogConfig":{ + "shape":"AwsAppSyncGraphQlApiLogConfigDetails", + "documentation":"

      The Amazon CloudWatch Logs configuration.

      " + }, + "AdditionalAuthenticationProviders":{ + "shape":"AwsAppSyncGraphQlApiAdditionalAuthenticationProvidersList", + "documentation":"

      A list of additional authentication providers for the GraphQL API.

      " + }, + "WafWebAclArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the WAF web access control list (web ACL) associated with this GraphQL API, if one exists.

      " + } + }, + "documentation":"

      Provides details about an AppSync Graph QL API, which lets you query multiple databases, microservices, and APIs from a single GraphQL endpoint.

      " + }, + "AwsAppSyncGraphQlApiLambdaAuthorizerConfigDetails":{ + "type":"structure", + "members":{ + "AuthorizerResultTtlInSeconds":{ + "shape":"Integer", + "documentation":"

      The number of seconds a response should be cached for. The default is 5 minutes (300 seconds).

      " + }, + "AuthorizerUri":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the Lambda function to be called for authorization. This can be a standard Lambda ARN, a version ARN (.../v3), or an alias ARN.

      " + }, + "IdentityValidationExpression":{ + "shape":"NonEmptyString", + "documentation":"

      A regular expression for validation of tokens before the Lambda function is called.

      " + } + }, + "documentation":"

      Specifies the authorization configuration for using an Lambda function with your AppSync GraphQL API endpoint.

      " + }, + "AwsAppSyncGraphQlApiLogConfigDetails":{ + "type":"structure", + "members":{ + "CloudWatchLogsRoleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the service role that AppSync assumes to publish to CloudWatch Logs in your account.

      " + }, + "ExcludeVerboseContent":{ + "shape":"Boolean", + "documentation":"

      Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level.

      " + }, + "FieldLogLevel":{ + "shape":"NonEmptyString", + "documentation":"

      The field logging level.

      " + } + }, + "documentation":"

      Specifies the logging configuration when writing GraphQL operations and tracing to Amazon CloudWatch for an AppSync GraphQL API.

      " + }, + "AwsAppSyncGraphQlApiOpenIdConnectConfigDetails":{ + "type":"structure", + "members":{ + "AuthTtL":{ + "shape":"Long", + "documentation":"

      The number of milliseconds that a token is valid after being authenticated.

      " + }, + "ClientId":{ + "shape":"NonEmptyString", + "documentation":"

      The client identifier of the relying party at the OpenID identity provider. This identifier is typically obtained when the relying party is registered with the OpenID identity provider. You can specify a regular expression so that AppSync can validate against multiple client identifiers at a time.

      " + }, + "IatTtL":{ + "shape":"Long", + "documentation":"

      The number of milliseconds that a token is valid after it's issued to a user.

      " + }, + "Issuer":{ + "shape":"NonEmptyString", + "documentation":"

      The issuer for the OIDC configuration. The issuer returned by discovery must exactly match the value of iss in the ID token.

      " + } + }, + "documentation":"

      Specifies the authorization configuration for using an OpenID Connect compliant service with your AppSync GraphQL API endpoint.

      " + }, + "AwsAppSyncGraphQlApiUserPoolConfigDetails":{ + "type":"structure", + "members":{ + "AppIdClientRegex":{ + "shape":"NonEmptyString", + "documentation":"

      A regular expression for validating the incoming Amazon Cognito user pools app client ID. If this value isn't set, no filtering is applied.

      " + }, + "AwsRegion":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Web Services Region in which the user pool was created.

      " + }, + "DefaultAction":{ + "shape":"NonEmptyString", + "documentation":"

      The action that you want your GraphQL API to take when a request that uses Amazon Cognito user pools authentication doesn't match the Amazon Cognito user pools configuration.

      " + }, + "UserPoolId":{ + "shape":"NonEmptyString", + "documentation":"

      The user pool ID.

      " + } + }, + "documentation":"

      Specifies the authorization configuration for using Amazon Cognito user pools with your AppSync GraphQL API endpoint.

      " + }, "AwsAutoScalingAutoScalingGroupAvailabilityZonesList":{ "type":"list", "member":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails"} @@ -8006,75 +8775,265 @@ "documentation":"

      The owner of the security group.

      " } }, - "documentation":"

      Contains information about the security group for the load balancer.

      " + "documentation":"

      Contains information about the security group for the load balancer.

      " + }, + "AwsElbv2LoadBalancerAttribute":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the load balancer attribute.

      " + }, + "Value":{ + "shape":"NonEmptyString", + "documentation":"

      The value of the load balancer attribute.

      " + } + }, + "documentation":"

      A load balancer attribute.

      " + }, + "AwsElbv2LoadBalancerAttributes":{ + "type":"list", + "member":{"shape":"AwsElbv2LoadBalancerAttribute"} + }, + "AwsElbv2LoadBalancerDetails":{ + "type":"structure", + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

      The Availability Zones for the load balancer.

      " + }, + "CanonicalHostedZoneId":{ + "shape":"NonEmptyString", + "documentation":"

      The ID of the Amazon Route 53 hosted zone associated with the load balancer.

      " + }, + "CreatedTime":{ + "shape":"NonEmptyString", + "documentation":"

      Indicates when the load balancer was created.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces, and date and time should be separated by T. For example, 2020-03-22T13:22:13.933Z.

      " + }, + "DNSName":{ + "shape":"NonEmptyString", + "documentation":"

      The public DNS name of the load balancer.

      " + }, + "IpAddressType":{ + "shape":"NonEmptyString", + "documentation":"

      The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

      " + }, + "Scheme":{ + "shape":"NonEmptyString", + "documentation":"

      The nodes of an Internet-facing load balancer have public IP addresses.

      " + }, + "SecurityGroups":{ + "shape":"SecurityGroups", + "documentation":"

      The IDs of the security groups for the load balancer.

      " + }, + "State":{ + "shape":"LoadBalancerState", + "documentation":"

      The state of the load balancer.

      " + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

      The type of load balancer.

      " + }, + "VpcId":{ + "shape":"NonEmptyString", + "documentation":"

      The ID of the VPC for the load balancer.

      " + }, + "LoadBalancerAttributes":{ + "shape":"AwsElbv2LoadBalancerAttributes", + "documentation":"

      Attributes of the load balancer.

      " + } + }, + "documentation":"

      Information about a load balancer.

      " + }, + "AwsEventSchemasRegistryDetails":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the registry to be created.

      " + }, + "RegistryArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the registry.

      " + }, + "RegistryName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the schema registry.

      " + } + }, + "documentation":"

      A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for schemas. They collect and organize schemas so that your schemas are in logical groups.

      " + }, + "AwsGuardDutyDetectorDataSourcesCloudTrailDetails":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"NonEmptyString", + "documentation":"

      Specifies whether CloudTrail is activated as a data source for the detector.

      " + } + }, + "documentation":"

      An object that contains information on the status of CloudTrail as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesDetails":{ + "type":"structure", + "members":{ + "CloudTrail":{ + "shape":"AwsGuardDutyDetectorDataSourcesCloudTrailDetails", + "documentation":"

      An object that contains information on the status of CloudTrail as a data source for the detector.

      " + }, + "DnsLogs":{ + "shape":"AwsGuardDutyDetectorDataSourcesDnsLogsDetails", + "documentation":"

      An object that contains information on the status of DNS logs as a data source for the detector.

      " + }, + "FlowLogs":{ + "shape":"AwsGuardDutyDetectorDataSourcesFlowLogsDetails", + "documentation":"

      An object that contains information on the status of VPC Flow Logs as a data source for the detector.

      " + }, + "Kubernetes":{ + "shape":"AwsGuardDutyDetectorDataSourcesKubernetesDetails", + "documentation":"

      An object that contains information on the status of Kubernetes data sources for the detector.

      " + }, + "MalwareProtection":{ + "shape":"AwsGuardDutyDetectorDataSourcesMalwareProtectionDetails", + "documentation":"

      An object that contains information on the status of Malware Protection as a data source for the detector.

      " + }, + "S3Logs":{ + "shape":"AwsGuardDutyDetectorDataSourcesS3LogsDetails", + "documentation":"

      An object that contains information on the status of S3 Data event logs as a data source for the detector.

      " + } + }, + "documentation":"

      Describes which data sources are activated for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesDnsLogsDetails":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"NonEmptyString", + "documentation":"

      Describes whether DNS logs is enabled as a data source for the detector.

      " + } + }, + "documentation":"

      An object that contains information on the status of DNS logs as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesFlowLogsDetails":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"NonEmptyString", + "documentation":"

      Describes whether VPC Flow Logs are activated as a data source for the detector.

      " + } + }, + "documentation":"

      An object that contains information on the status of VPC Flow Logs as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesKubernetesAuditLogsDetails":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"NonEmptyString", + "documentation":"

      Describes whether Kubernetes audit logs are activated as a data source for the detector.

      " + } + }, + "documentation":"

      An object that contains information on the status of Kubernetes audit logs as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesKubernetesDetails":{ + "type":"structure", + "members":{ + "AuditLogs":{ + "shape":"AwsGuardDutyDetectorDataSourcesKubernetesAuditLogsDetails", + "documentation":"

      Describes whether Kubernetes audit logs are activated as a data source for the detector.

      " + } + }, + "documentation":"

      An object that contains information on the status of Kubernetes data sources for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesMalwareProtectionDetails":{ + "type":"structure", + "members":{ + "ScanEc2InstanceWithFindings":{ + "shape":"AwsGuardDutyDetectorDataSourcesMalwareProtectionScanEc2InstanceWithFindingsDetails", + "documentation":"

      Describes the configuration of Malware Protection for EC2 instances with findings.

      " + }, + "ServiceRole":{ + "shape":"NonEmptyString", + "documentation":"

      The GuardDuty Malware Protection service role.

      " + } + }, + "documentation":"

      An object that contains information on the status of Malware Protection as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDataSourcesMalwareProtectionScanEc2InstanceWithFindingsDetails":{ + "type":"structure", + "members":{ + "EbsVolumes":{ + "shape":"AwsGuardDutyDetectorDataSourcesMalwareProtectionScanEc2InstanceWithFindingsEbsVolumesDetails", + "documentation":"

      Describes the configuration of scanning EBS volumes (Malware Protection) as a data source.

      " + } + }, + "documentation":"

      Describes the configuration of Malware Protection for EC2 instances with findings.

      " }, - "AwsElbv2LoadBalancerAttribute":{ + "AwsGuardDutyDetectorDataSourcesMalwareProtectionScanEc2InstanceWithFindingsEbsVolumesDetails":{ "type":"structure", "members":{ - "Key":{ + "Reason":{ "shape":"NonEmptyString", - "documentation":"

      The name of the load balancer attribute.

      " + "documentation":"

      Specifies the reason why scanning EBS volumes (Malware Protection) isn’t activated as a data source.

      " }, - "Value":{ + "Status":{ "shape":"NonEmptyString", - "documentation":"

      The value of the load balancer attribute.

      " + "documentation":"

      Describes whether scanning EBS volumes is activated as a data source for the detector.

      " } }, - "documentation":"

      A load balancer attribute.

      " - }, - "AwsElbv2LoadBalancerAttributes":{ - "type":"list", - "member":{"shape":"AwsElbv2LoadBalancerAttribute"} + "documentation":"

      Describes the configuration of scanning EBS volumes (Malware Protection) as a data source.

      " }, - "AwsElbv2LoadBalancerDetails":{ + "AwsGuardDutyDetectorDataSourcesS3LogsDetails":{ "type":"structure", "members":{ - "AvailabilityZones":{ - "shape":"AvailabilityZones", - "documentation":"

      The Availability Zones for the load balancer.

      " - }, - "CanonicalHostedZoneId":{ + "Status":{ "shape":"NonEmptyString", - "documentation":"

      The ID of the Amazon Route 53 hosted zone associated with the load balancer.

      " + "documentation":"

      A value that describes whether S3 data event logs are automatically enabled for new members of an organization.

      " + } + }, + "documentation":"

      An object that contains information on the status of S3 data event logs as a data source for the detector.

      " + }, + "AwsGuardDutyDetectorDetails":{ + "type":"structure", + "members":{ + "DataSources":{ + "shape":"AwsGuardDutyDetectorDataSourcesDetails", + "documentation":"

      Describes which data sources are activated for the detector.

      " }, - "CreatedTime":{ - "shape":"NonEmptyString", - "documentation":"

      Indicates when the load balancer was created.

      Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces, and date and time should be separated by T. For example, 2020-03-22T13:22:13.933Z.

      " + "Features":{ + "shape":"AwsGuardDutyDetectorFeaturesList", + "documentation":"

      Describes which features are activated for the detector.

      " }, - "DNSName":{ + "FindingPublishingFrequency":{ "shape":"NonEmptyString", - "documentation":"

      The public DNS name of the load balancer.

      " + "documentation":"

      The publishing frequency of the finding.

      " }, - "IpAddressType":{ + "ServiceRole":{ "shape":"NonEmptyString", - "documentation":"

      The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

      " + "documentation":"

      The GuardDuty service role.

      " }, - "Scheme":{ + "Status":{ "shape":"NonEmptyString", - "documentation":"

      The nodes of an Internet-facing load balancer have public IP addresses.

      " - }, - "SecurityGroups":{ - "shape":"SecurityGroups", - "documentation":"

      The IDs of the security groups for the load balancer.

      " - }, - "State":{ - "shape":"LoadBalancerState", - "documentation":"

      The state of the load balancer.

      " - }, - "Type":{ + "documentation":"

      The activation status of the detector.

      " + } + }, + "documentation":"

      Provides details about an Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector is required for GuardDuty to become operational.

      " + }, + "AwsGuardDutyDetectorFeaturesDetails":{ + "type":"structure", + "members":{ + "Name":{ "shape":"NonEmptyString", - "documentation":"

      The type of load balancer.

      " + "documentation":"

      Indicates the name of the feature that is activated for the detector.

      " }, - "VpcId":{ + "Status":{ "shape":"NonEmptyString", - "documentation":"

      The ID of the VPC for the load balancer.

      " - }, - "LoadBalancerAttributes":{ - "shape":"AwsElbv2LoadBalancerAttributes", - "documentation":"

      Attributes of the load balancer.

      " + "documentation":"

      Indicates the status of the feature that is activated for the detector.

      " } }, - "documentation":"

      Information about a load balancer.

      " + "documentation":"

      Describes which features are activated for the detector.

      " + }, + "AwsGuardDutyDetectorFeaturesList":{ + "type":"list", + "member":{"shape":"AwsGuardDutyDetectorFeaturesDetails"} }, "AwsIamAccessKeyDetails":{ "type":"structure", @@ -12351,6 +13310,96 @@ }, "documentation":"

      Provides information about the state of a patch on an instance based on the patch baseline that was used to patch the instance.

      " }, + "AwsStepFunctionStateMachineDetails":{ + "type":"structure", + "members":{ + "Label":{ + "shape":"NonEmptyString", + "documentation":"

      A user-defined or an auto-generated string that identifies a Map state. This parameter is present only if the stateMachineArn specified in input is a qualified state machine ARN.

      " + }, + "LoggingConfiguration":{ + "shape":"AwsStepFunctionStateMachineLoggingConfigurationDetails", + "documentation":"

      Used to set CloudWatch Logs options.

      " + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the state machine.

      " + }, + "RoleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the IAM role used when creating this state machine.

      " + }, + "StateMachineArn":{ + "shape":"NonEmptyString", + "documentation":"

      The ARN that identifies the state machine.

      " + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

      The current status of the state machine.

      " + }, + "TracingConfiguration":{ + "shape":"AwsStepFunctionStateMachineTracingConfigurationDetails", + "documentation":"

      Specifies whether X-Ray tracing is enabled.

      " + }, + "Type":{ + "shape":"NonEmptyString", + "documentation":"

      The type of the state machine (STANDARD or EXPRESS).

      " + } + }, + "documentation":"

      Provides details about an Step Functions state machine, which is a workflow consisting of a series of event- driven steps.

      " + }, + "AwsStepFunctionStateMachineLoggingConfigurationDestinationsCloudWatchLogsLogGroupDetails":{ + "type":"structure", + "members":{ + "LogGroupArn":{ + "shape":"NonEmptyString", + "documentation":"

      The ARN (ends with :*) of the CloudWatch Logs log group to which you want your logs emitted.

      " + } + }, + "documentation":"

      An object describing a CloudWatch log group. For more information, see Amazon Web Services::Logs::LogGroup in the CloudFormation User Guide.

      " + }, + "AwsStepFunctionStateMachineLoggingConfigurationDestinationsDetails":{ + "type":"structure", + "members":{ + "CloudWatchLogsLogGroup":{ + "shape":"AwsStepFunctionStateMachineLoggingConfigurationDestinationsCloudWatchLogsLogGroupDetails", + "documentation":"

      An object describing a CloudWatch Logs log group. For more information, see Amazon Web Services::Logs::LogGroup in the CloudFormation User Guide.

      " + } + }, + "documentation":"

      An array of objects that describes where your execution history events will be logged.

      " + }, + "AwsStepFunctionStateMachineLoggingConfigurationDestinationsList":{ + "type":"list", + "member":{"shape":"AwsStepFunctionStateMachineLoggingConfigurationDestinationsDetails"} + }, + "AwsStepFunctionStateMachineLoggingConfigurationDetails":{ + "type":"structure", + "members":{ + "Destinations":{ + "shape":"AwsStepFunctionStateMachineLoggingConfigurationDestinationsList", + "documentation":"

      An array of objects that describes where your execution history events will be logged.

      " + }, + "IncludeExecutionData":{ + "shape":"Boolean", + "documentation":"

      Determines whether execution data is included in your log. When set to false, data is excluded.

      " + }, + "Level":{ + "shape":"NonEmptyString", + "documentation":"

      Defines which category of execution history events are logged.

      " + } + }, + "documentation":"

      The LoggingConfiguration data type is used to set CloudWatch Logs options.

      " + }, + "AwsStepFunctionStateMachineTracingConfigurationDetails":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

      When set to true, X-Ray tracing is enabled.

      " + } + }, + "documentation":"

      Specifies whether X-Ray tracing is enabled.

      " + }, "AwsWafRateBasedRuleDetails":{ "type":"structure", "members":{ @@ -13083,6 +14132,29 @@ }, "documentation":"

      Information about the encryption configuration for X-Ray.

      " }, + "BatchDeleteAutomationRulesRequest":{ + "type":"structure", + "required":["AutomationRulesArns"], + "members":{ + "AutomationRulesArns":{ + "shape":"AutomationRulesArnsList", + "documentation":"

      A list of Amazon Resource Names (ARNs) for the rules that are to be deleted.

      " + } + } + }, + "BatchDeleteAutomationRulesResponse":{ + "type":"structure", + "members":{ + "ProcessedAutomationRules":{ + "shape":"AutomationRulesArnsList", + "documentation":"

      A list of properly processed rule ARNs.

      " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

      A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't delete and why.

      " + } + } + }, "BatchDisableStandardsRequest":{ "type":"structure", "required":["StandardsSubscriptionArns"], @@ -13121,6 +14193,29 @@ } } }, + "BatchGetAutomationRulesRequest":{ + "type":"structure", + "required":["AutomationRulesArns"], + "members":{ + "AutomationRulesArns":{ + "shape":"AutomationRulesArnsList", + "documentation":"

      A list of rule ARNs to get details for.

      " + } + } + }, + "BatchGetAutomationRulesResponse":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"AutomationRulesConfigList", + "documentation":"

      A list of rule details for the provided rule ARNs.

      " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

      A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't retrieve and why.

      " + } + } + }, "BatchGetSecurityControlsRequest":{ "type":"structure", "required":["SecurityControlIds"], @@ -13206,6 +14301,29 @@ } } }, + "BatchUpdateAutomationRulesRequest":{ + "type":"structure", + "required":["UpdateAutomationRulesRequestItems"], + "members":{ + "UpdateAutomationRulesRequestItems":{ + "shape":"UpdateAutomationRulesRequestItemsList", + "documentation":"

      An array of ARNs for the rules that are to be updated. Optionally, you can also include RuleStatus and RuleOrder.

      " + } + } + }, + "BatchUpdateAutomationRulesResponse":{ + "type":"structure", + "members":{ + "ProcessedAutomationRules":{ + "shape":"AutomationRulesArnsList", + "documentation":"

      A list of properly processed rule ARNs.

      " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

      A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't update and why.

      " + } + } + }, "BatchUpdateFindingsRequest":{ "type":"structure", "required":["FindingIdentifiers"], @@ -13562,6 +14680,59 @@ } } }, + "CreateAutomationRuleRequest":{ + "type":"structure", + "required":[ + "RuleOrder", + "RuleName", + "Description", + "Criteria", + "Actions" + ], + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

      User-defined tags that help you label the purpose of a rule.

      " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

      Whether the rule is active after it is created. If this parameter is equal to Enabled, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

      " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

      An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

      " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the rule.

      " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the rule.

      " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

      Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding. The default value of this field is false.

      " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

      A set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

      " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

      One or more actions to update finding fields if a finding matches the conditions specified in Criteria.

      " + } + } + }, + "CreateAutomationRuleResponse":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) of the automation rule that you created.

      " + } + } + }, "CreateFindingAggregatorRequest":{ "type":"structure", "required":["RegionLinkingMode"], @@ -14287,7 +15458,7 @@ "FindingIdentifier":{"shape":"AwsSecurityFindingIdentifier"}, "UpdateTime":{ "shape":"Timestamp", - "documentation":"

      An ISO 8601-formatted timestamp that indicates when the security findings provider last updated the finding record. A correctly formatted example is 2020-05-21T20:16:34.724Z. The value cannot contain spaces, and date and time should be separated by T. For more information, see RFC 3339 section 5.6, Internet Date/Time Format.

      " + "documentation":"

      An ISO 8601-formatted timestamp that indicates when Security Hub processed the updated finding record.

      A correctly formatted example is 2020-05-21T20:16:34.724Z. The value cannot contain spaces, and date and time should be separated by T. For more information, see RFC 3339 section 5.6, Internet Date/Time Format.

      " }, "FindingCreated":{ "shape":"Boolean", @@ -15026,6 +16197,36 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListAutomationRulesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

      A token to specify where to start paginating the response. This is the NextToken from a previously truncated response. On your first call to the ListAutomationRules API, set the value of this parameter to NULL.

      ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of rules to return in the response. This currently ranges from 1 to 100.

      ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListAutomationRulesResponse":{ + "type":"structure", + "members":{ + "AutomationRulesMetadata":{ + "shape":"AutomationRulesMetadataList", + "documentation":"

      Metadata for rules in the calling account. The response includes rules with a RuleStatus of ENABLED and DISABLED.

      " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

      A pagination token for the response.

      " + } + } + }, "ListEnabledProductsForImportRequest":{ "type":"structure", "members":{ @@ -16404,6 +17605,26 @@ "AwsEc2RouteTable":{ "shape":"AwsEc2RouteTableDetails", "documentation":"

      Provides details about a route table. A route table contains a set of rules, called routes, that determine where to direct network traffic from your subnet or gateway.

      " + }, + "AwsAmazonMqBroker":{ + "shape":"AwsAmazonMqBrokerDetails", + "documentation":"

      Provides details about AppSync message broker. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols.

      " + }, + "AwsAppSyncGraphQlApi":{ + "shape":"AwsAppSyncGraphQlApiDetails", + "documentation":"

      Provides details about an AppSync Graph QL API, which lets you query multiple databases, microservices, and APIs from a single GraphQL endpoint.

      " + }, + "AwsEventSchemasRegistry":{ + "shape":"AwsEventSchemasRegistryDetails", + "documentation":"

      A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for schemas. They collect and organize schemas so that your schemas are in logical groups.

      " + }, + "AwsGuardDutyDetector":{ + "shape":"AwsGuardDutyDetectorDetails", + "documentation":"

      Provides details about an Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector is required for GuardDuty to become operational.

      " + }, + "AwsStepFunctionStateMachine":{ + "shape":"AwsStepFunctionStateMachineDetails", + "documentation":"

      Provides details about an Step Functions state machine, which is a workflow consisting of a series of event-driven steps.

      " } }, "documentation":"

      Additional details about a resource related to a finding.

      To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

      If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

      You also use the Other object to populate the details when the selected type does not have a corresponding object.

      " @@ -16856,6 +18077,18 @@ }, "documentation":"

      A list of port ranges.

      " }, + "RuleOrderValue":{ + "type":"integer", + "max":1000, + "min":1 + }, + "RuleStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "SecurityControl":{ "type":"structure", "required":[ @@ -17713,6 +18946,28 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "UnprocessedAutomationRule":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) for the unprocessed automation rule.

      " + }, + "ErrorCode":{ + "shape":"Integer", + "documentation":"

      The error code associated with the unprocessed automation rule.

      " + }, + "ErrorMessage":{ + "shape":"NonEmptyString", + "documentation":"

      An error message describing why a request didn't process a specific rule.

      " + } + }, + "documentation":"

      A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't process and why.

      " + }, + "UnprocessedAutomationRulesList":{ + "type":"list", + "member":{"shape":"UnprocessedAutomationRule"} + }, "UnprocessedErrorCode":{ "type":"string", "enum":[ @@ -17851,6 +19106,51 @@ "members":{ } }, + "UpdateAutomationRulesRequestItem":{ + "type":"structure", + "required":["RuleArn"], + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

      The Amazon Resource Name (ARN) for the rule.

      " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

      Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

      " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

      An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

      " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

      A description of the rule.

      " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

      The name of the rule.

      " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

      Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

      " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

      A set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

      " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

      One or more actions to update finding fields if a finding matches the conditions specified in Criteria.

      " + } + }, + "documentation":"

      Specifies the parameters to update in an existing automation rule.

      " + }, + "UpdateAutomationRulesRequestItemsList":{ + "type":"list", + "member":{"shape":"UpdateAutomationRulesRequestItem"}, + "max":100, + "min":1 + }, "UpdateFindingAggregatorRequest":{ "type":"structure", "required":[ diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 71454c5621e3..709e00bb78aa 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json index 391f37669d90..c25bed0b2a3e 100644 --- a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": true, "Region": "us-gov-east-1", - "UseDualStack": true + "UseFIPS": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": false, "Region": "us-gov-east-1", - "UseDualStack": false + "UseFIPS": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": true, "Region": "us-gov-east-1", - "UseDualStack": true + "UseFIPS": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": false, "Region": "us-gov-east-1", - "UseDualStack": false + "UseFIPS": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": true, "Region": "cn-north-1", - "UseDualStack": true + "UseFIPS": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": false, "Region": "cn-north-1", - "UseDualStack": false + "UseFIPS": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": true, "Region": "cn-north-1", - "UseDualStack": true + "UseFIPS": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": false, "Region": "cn-north-1", - "UseDualStack": false + "UseFIPS": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, + "UseDualStack": true, "Region": "us-iso-east-1", - "UseDualStack": true + "UseFIPS": true } }, { @@ -123,9 +123,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": false, "Region": "us-iso-east-1", - "UseDualStack": false + "UseFIPS": true } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, + "UseDualStack": true, "Region": "us-iso-east-1", - "UseDualStack": true + "UseFIPS": false } }, { @@ -147,9 +147,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": false, "Region": "us-iso-east-1", - "UseDualStack": false + "UseFIPS": false } }, { @@ -160,9 +160,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": true, "Region": "us-east-1", - "UseDualStack": true + "UseFIPS": true } }, { @@ -173,9 +173,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": false, "Region": "us-east-1", - "UseDualStack": false + "UseFIPS": true } }, { @@ -186,9 +186,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": true, "Region": "us-east-1", - "UseDualStack": true + "UseFIPS": false } }, { @@ -199,9 +199,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": false, "Region": "us-east-1", - "UseDualStack": false + "UseFIPS": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, + "UseDualStack": true, "Region": "us-isob-east-1", - "UseDualStack": true + "UseFIPS": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseFIPS": true, + "UseDualStack": false, "Region": "us-isob-east-1", - "UseDualStack": false + "UseFIPS": true } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, + "UseDualStack": true, "Region": "us-isob-east-1", - "UseDualStack": true + "UseFIPS": false } }, { @@ -247,9 +247,9 @@ } }, "params": { - "UseFIPS": false, + "UseDualStack": false, "Region": "us-isob-east-1", - "UseDualStack": false + "UseFIPS": false } }, { @@ -260,9 +260,9 @@ } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true, + "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } } diff --git a/services/securitylake/src/main/resources/codegen-resources/paginators-1.json b/services/securitylake/src/main/resources/codegen-resources/paginators-1.json index 472071730222..19e482b21127 100644 --- a/services/securitylake/src/main/resources/codegen-resources/paginators-1.json +++ b/services/securitylake/src/main/resources/codegen-resources/paginators-1.json @@ -1,22 +1,22 @@ { "pagination": { - "GetDatalakeStatus": { + "GetDataLakeSources": { "input_token": "nextToken", "output_token": "nextToken", - "limit_key": "maxAccountResults", - "result_key": "accountSourcesList" + "limit_key": "maxResults", + "result_key": "dataLakeSources" }, - "ListDatalakeExceptions": { + "ListDataLakeExceptions": { "input_token": "nextToken", "output_token": "nextToken", - "limit_key": "maxFailures", - "result_key": "nonRetryableFailures" + "limit_key": "maxResults", + "result_key": "exceptions" }, "ListLogSources": { "input_token": "nextToken", "output_token": "nextToken", "limit_key": "maxResults", - "result_key": "regionSourceTypesAccountsList" + "result_key": "sources" }, "ListSubscribers": { "input_token": "nextToken", diff --git a/services/securitylake/src/main/resources/codegen-resources/service-2.json b/services/securitylake/src/main/resources/codegen-resources/service-2.json index 5ee4b444af8c..40e2a0c3268e 100644 --- a/services/securitylake/src/main/resources/codegen-resources/service-2.json +++ b/services/securitylake/src/main/resources/codegen-resources/service-2.json @@ -16,111 +16,97 @@ "name":"CreateAwsLogSource", "http":{ "method":"POST", - "requestUri":"/v1/logsources/aws", + "requestUri":"/v1/datalake/logsources/aws", "responseCode":200 }, "input":{"shape":"CreateAwsLogSourceRequest"}, "output":{"shape":"CreateAwsLogSourceResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"S3Exception"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. At least one of the three dimensions is a mandatory input to this API. However, you can supply any combination of the three dimensions to this API.

      By default, a dimension refers to the entire set. When you don't provide a dimension, Security Lake assumes that the missing dimension refers to the entire set. This is overridden when you supply any one of the inputs. For instance, when you do not specify members, the API enables all Security Lake member accounts for all sources. Similarly, when you do not specify Regions, Security Lake is enabled for all the Regions where Security Lake is available as a service.

      You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source.

      " + "documentation":"

      Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it,

      You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source.

      " }, "CreateCustomLogSource":{ "name":"CreateCustomLogSource", "http":{ "method":"POST", - "requestUri":"/v1/logsources/custom", + "requestUri":"/v1/datalake/logsources/custom", "responseCode":200 }, "input":{"shape":"CreateCustomLogSourceRequest"}, "output":{"shape":"CreateCustomLogSourceResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ConflictSourceNamesException"}, {"shape":"AccessDeniedException"}, - {"shape":"BucketNotFoundException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Adds a third-party custom source in Amazon Security Lake, from the Amazon Web Services Region where you want to create a custom source. Security Lake can collect logs and events from third-party custom sources. After creating the appropriate IAM role to invoke Glue crawler, use this API to add a custom source name in Security Lake. This operation creates a partition in the Amazon S3 bucket for Security Lake as the target location for log files from the custom source in addition to an associated Glue table and an Glue crawler.

      " + "documentation":"

      Adds a third-party custom source in Amazon Security Lake, from the Amazon Web Services Region where you want to create a custom source. Security Lake can collect logs and events from third-party custom sources. After creating the appropriate IAM role to invoke Glue crawler, use this API to add a custom source name in Security Lake. This operation creates a partition in the Amazon S3 bucket for Security Lake as the target location for log files from the custom source. In addition, this operation also creates an associated Glue table and an Glue crawler.

      ", + "idempotent":true }, - "CreateDatalake":{ - "name":"CreateDatalake", + "CreateDataLake":{ + "name":"CreateDataLake", "http":{ "method":"POST", "requestUri":"/v1/datalake", "responseCode":200 }, - "input":{"shape":"CreateDatalakeRequest"}, - "output":{"shape":"CreateDatalakeResponse"}, + "input":{"shape":"CreateDataLakeRequest"}, + "output":{"shape":"CreateDataLakeResponse"}, "errors":[ - {"shape":"ServiceQuotaExceededException"}, - {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. You can either use the enableAll parameter to specify all Regions or specify the Regions where you want to enable Security Lake. To specify particular Regions, use the Regions parameter and then configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations.

      When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

      " + "documentation":"

      Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. By default, the CreateDataLake Security Lake in all Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations.

      When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

      " }, - "CreateDatalakeAutoEnable":{ - "name":"CreateDatalakeAutoEnable", + "CreateDataLakeExceptionSubscription":{ + "name":"CreateDataLakeExceptionSubscription", "http":{ "method":"POST", - "requestUri":"/v1/datalake/autoenable", + "requestUri":"/v1/datalake/exceptions/subscription", "responseCode":200 }, - "input":{"shape":"CreateDatalakeAutoEnableRequest"}, - "output":{"shape":"CreateDatalakeAutoEnableResponse"}, + "input":{"shape":"CreateDataLakeExceptionSubscriptionRequest"}, + "output":{"shape":"CreateDataLakeExceptionSubscriptionResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} - ], - "documentation":"

      Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization.

      " - }, - "CreateDatalakeDelegatedAdmin":{ - "name":"CreateDatalakeDelegatedAdmin", - "http":{ - "method":"POST", - "requestUri":"/v1/datalake/delegate", - "responseCode":200 - }, - "input":{"shape":"CreateDatalakeDelegatedAdminRequest"}, - "output":{"shape":"CreateDatalakeDelegatedAdminResponse"}, - "errors":[ - {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Designates the Amazon Security Lake delegated administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

      " + "documentation":"

      Creates the specified notification subscription in Amazon Security Lake for the organization you specify.

      " }, - "CreateDatalakeExceptionsSubscription":{ - "name":"CreateDatalakeExceptionsSubscription", + "CreateDataLakeOrganizationConfiguration":{ + "name":"CreateDataLakeOrganizationConfiguration", "http":{ "method":"POST", - "requestUri":"/v1/datalake/exceptions/subscription", + "requestUri":"/v1/datalake/organization/configuration", "responseCode":200 }, - "input":{"shape":"CreateDatalakeExceptionsSubscriptionRequest"}, - "output":{"shape":"CreateDatalakeExceptionsSubscriptionResponse"}, + "input":{"shape":"CreateDataLakeOrganizationConfigurationRequest"}, + "output":{"shape":"CreateDataLakeOrganizationConfigurationResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Creates the specified notification subscription in Amazon Security Lake for the organization you specify.

      " + "documentation":"

      Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization.

      " }, "CreateSubscriber":{ "name":"CreateSubscriber", @@ -132,34 +118,31 @@ "input":{"shape":"CreateSubscriberRequest"}, "output":{"shape":"CreateSubscriberResponse"}, "errors":[ - {"shape":"ConflictSubscriptionException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"BucketNotFoundException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      Creates a subscription permission for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region.

      " }, - "CreateSubscriptionNotificationConfiguration":{ - "name":"CreateSubscriptionNotificationConfiguration", + "CreateSubscriberNotification":{ + "name":"CreateSubscriberNotification", "http":{ "method":"POST", - "requestUri":"/subscription-notifications/{subscriptionId}", + "requestUri":"/v1/subscribers/{subscriberId}/notification", "responseCode":200 }, - "input":{"shape":"CreateSubscriptionNotificationConfigurationRequest"}, - "output":{"shape":"CreateSubscriptionNotificationConfigurationResponse"}, + "input":{"shape":"CreateSubscriberNotificationRequest"}, + "output":{"shape":"CreateSubscriberNotificationResponse"}, "errors":[ - {"shape":"ConcurrentModificationException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      Notifies the subscriber when new data is written to the data lake for the sources that the subscriber consumes in Security Lake. You can create only one subscriber notification per subscriber.

      " }, @@ -167,293 +150,292 @@ "name":"DeleteAwsLogSource", "http":{ "method":"POST", - "requestUri":"/v1/logsources/aws/delete", + "requestUri":"/v1/datalake/logsources/aws/delete", "responseCode":200 }, "input":{"shape":"DeleteAwsLogSourceRequest"}, "output":{"shape":"DeleteAwsLogSourceResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Removes a natively supported Amazon Web Service as an Amazon Security Lake source. When you remove the source, Security Lake stops collecting data from that source, and subscribers can no longer consume new data from the source. Subscribers can still consume data that Security Lake collected from the source before disablement.

      You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts. At least one of the three dimensions is a mandatory input to this API. However, you can supply any combination of the three dimensions to this API.

      By default, a dimension refers to the entire set. This is overridden when you supply any one of the inputs. For instance, when you do not specify members, the API disables all Security Lake member accounts for sources. Similarly, when you do not specify Regions, Security Lake is disabled for all the Regions where Security Lake is available as a service.

      When you don't provide a dimension, Security Lake assumes that the missing dimension refers to the entire set. For example, if you don't provide specific accounts, the API applies to the entire set of accounts in your organization.

      " + "documentation":"

      Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal.

      You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts.

      " }, "DeleteCustomLogSource":{ "name":"DeleteCustomLogSource", "http":{ "method":"DELETE", - "requestUri":"/v1/logsources/custom", + "requestUri":"/v1/datalake/logsources/custom/{sourceName}", "responseCode":200 }, "input":{"shape":"DeleteCustomLogSourceRequest"}, "output":{"shape":"DeleteCustomLogSourceResponse"}, "errors":[ - {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ConflictSourceNamesException"}, - {"shape":"AccessDeniedException"}, - {"shape":"BucketNotFoundException"}, + {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"} - ], - "documentation":"

      Removes a custom log source from Amazon Security Lake.

      ", - "idempotent":true - }, - "DeleteDatalake":{ - "name":"DeleteDatalake", - "http":{ - "method":"DELETE", - "requestUri":"/v1/datalake", - "responseCode":200 - }, - "input":{"shape":"DeleteDatalakeRequest"}, - "output":{"shape":"DeleteDatalakeResponse"}, - "errors":[ - {"shape":"ServiceQuotaExceededException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      When you delete Amazon Security Lake from your account, Security Lake is disabled in all Amazon Web Services Regions. Also, this API automatically takes steps to remove the account from Security Lake .

      This operation disables security data collection from sources, deletes data stored, and stops making data accessible to subscribers. Security Lake also deletes all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. The DeleteDatalake operation does not delete the Amazon S3 bucket, which is owned by your Amazon Web Services account. For more information, see the Amazon Security Lake User Guide.

      ", + "documentation":"

      Removes a custom log source from Amazon Security Lake, to stop sending data from the custom source to Security Lake.

      ", "idempotent":true }, - "DeleteDatalakeAutoEnable":{ - "name":"DeleteDatalakeAutoEnable", + "DeleteDataLake":{ + "name":"DeleteDataLake", "http":{ "method":"POST", - "requestUri":"/v1/datalake/autoenable/delete", + "requestUri":"/v1/datalake/delete", "responseCode":200 }, - "input":{"shape":"DeleteDatalakeAutoEnableRequest"}, - "output":{"shape":"DeleteDatalakeAutoEnableResponse"}, + "input":{"shape":"DeleteDataLakeRequest"}, + "output":{"shape":"DeleteDataLakeResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      DeleteDatalakeAutoEnable removes automatic enablement of configuration settings for new member accounts (but keeps settings for the delegated administrator) from Amazon Security Lake. You must run this API using credentials of the delegated administrator. When you run this API, new member accounts that are added after the organization enables Security Lake won't contribute to the data lake.

      " + "documentation":"

      When you disable Amazon Security Lake from your account, Security Lake is disabled in all Amazon Web Services Regions and it stops collecting data from your sources. Also, this API automatically takes steps to remove the account from Security Lake. However, Security Lake retains all of your existing settings and the resources that it created in your Amazon Web Services account in the current Amazon Web Services Region.

      The DeleteDataLake operation does not delete the data that is stored in your Amazon S3 bucket, which is owned by your Amazon Web Services account. For more information, see the Amazon Security Lake User Guide.

      ", + "idempotent":true }, - "DeleteDatalakeDelegatedAdmin":{ - "name":"DeleteDatalakeDelegatedAdmin", + "DeleteDataLakeExceptionSubscription":{ + "name":"DeleteDataLakeExceptionSubscription", "http":{ "method":"DELETE", - "requestUri":"/v1/datalake/delegate/{account}", + "requestUri":"/v1/datalake/exceptions/subscription", "responseCode":200 }, - "input":{"shape":"DeleteDatalakeDelegatedAdminRequest"}, - "output":{"shape":"DeleteDatalakeDelegatedAdminResponse"}, + "input":{"shape":"DeleteDataLakeExceptionSubscriptionRequest"}, + "output":{"shape":"DeleteDataLakeExceptionSubscriptionResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Deletes the Amazon Security Lake delegated administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

      " + "documentation":"

      Deletes the specified notification subscription in Amazon Security Lake for the organization you specify.

      ", + "idempotent":true }, - "DeleteDatalakeExceptionsSubscription":{ - "name":"DeleteDatalakeExceptionsSubscription", + "DeleteDataLakeOrganizationConfiguration":{ + "name":"DeleteDataLakeOrganizationConfiguration", "http":{ - "method":"DELETE", - "requestUri":"/v1/datalake/exceptions/subscription", + "method":"POST", + "requestUri":"/v1/datalake/organization/configuration/delete", "responseCode":200 }, - "input":{"shape":"DeleteDatalakeExceptionsSubscriptionRequest"}, - "output":{"shape":"DeleteDatalakeExceptionsSubscriptionResponse"}, + "input":{"shape":"DeleteDataLakeOrganizationConfigurationRequest"}, + "output":{"shape":"DeleteDataLakeOrganizationConfigurationResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Deletes the specified notification subscription in Amazon Security Lake for the organization you specify.

      " + "documentation":"

      Removes automatic the enablement of configuration settings for new member accounts (but retains the settings for the delegated administrator) from Amazon Security Lake. You must run this API using the credentials of the delegated administrator. When you run this API, new member accounts that are added after the organization enables Security Lake won't contribute to the data lake.

      " }, "DeleteSubscriber":{ "name":"DeleteSubscriber", "http":{ "method":"DELETE", - "requestUri":"/v1/subscribers", + "requestUri":"/v1/subscribers/{subscriberId}", "responseCode":200 }, "input":{"shape":"DeleteSubscriberRequest"}, "output":{"shape":"DeleteSubscriberResponse"}, "errors":[ - {"shape":"ConcurrentModificationException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"BucketNotFoundException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Deletes the subscription permission for accounts that are already enabled in Amazon Security Lake. You can delete a subscriber and remove access to data in the current Amazon Web Services Region.

      ", + "documentation":"

      Deletes the subscription permission and all notification settings for accounts that are already enabled in Amazon Security Lake. When you run DeleteSubscriber, the subscriber will no longer consume data from Security Lake and the subscriber is removed. This operation deletes the subscriber and removes access to data in the current Amazon Web Services Region.

      ", "idempotent":true }, - "DeleteSubscriptionNotificationConfiguration":{ - "name":"DeleteSubscriptionNotificationConfiguration", + "DeleteSubscriberNotification":{ + "name":"DeleteSubscriberNotification", "http":{ "method":"DELETE", - "requestUri":"/subscription-notifications/{subscriptionId}", + "requestUri":"/v1/subscribers/{subscriberId}/notification", "responseCode":200 }, - "input":{"shape":"DeleteSubscriptionNotificationConfigurationRequest"}, - "output":{"shape":"DeleteSubscriptionNotificationConfigurationResponse"}, + "input":{"shape":"DeleteSubscriberNotificationRequest"}, + "output":{"shape":"DeleteSubscriberNotificationResponse"}, "errors":[ - {"shape":"ConcurrentModificationException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      Deletes the specified notification subscription in Amazon Security Lake for the organization you specify.

      ", "idempotent":true }, - "GetDatalake":{ - "name":"GetDatalake", + "DeregisterDataLakeDelegatedAdministrator":{ + "name":"DeregisterDataLakeDelegatedAdministrator", "http":{ - "method":"GET", - "requestUri":"/v1/datalake", + "method":"DELETE", + "requestUri":"/v1/datalake/delegate", "responseCode":200 }, - "input":{"shape":"GetDatalakeRequest"}, - "output":{"shape":"GetDatalakeResponse"}, + "input":{"shape":"DeregisterDataLakeDelegatedAdministratorRequest"}, + "output":{"shape":"DeregisterDataLakeDelegatedAdministratorResponse"}, "errors":[ - {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"}, + {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"} - ], - "documentation":"

      Retrieves the Amazon Security Lake configuration object for the specified Amazon Web Services account ID. You can use the GetDatalake API to know whether Security Lake is enabled for the current Region. This API does not take input parameters.

      " - }, - "GetDatalakeAutoEnable":{ - "name":"GetDatalakeAutoEnable", - "http":{ - "method":"GET", - "requestUri":"/v1/datalake/autoenable", - "responseCode":200 - }, - "input":{"shape":"GetDatalakeAutoEnableRequest"}, - "output":{"shape":"GetDatalakeAutoEnableResponse"}, - "errors":[ {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Retrieves the configuration that will be automatically set up for accounts added to the organization after the organization has onboarded to Amazon Security Lake. This API does not take input parameters.

      " + "documentation":"

      Deletes the Amazon Security Lake delegated administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

      ", + "idempotent":true }, - "GetDatalakeExceptionsExpiry":{ - "name":"GetDatalakeExceptionsExpiry", + "GetDataLakeExceptionSubscription":{ + "name":"GetDataLakeExceptionSubscription", "http":{ "method":"GET", - "requestUri":"/v1/datalake/exceptions/expiry", + "requestUri":"/v1/datalake/exceptions/subscription", "responseCode":200 }, - "input":{"shape":"GetDatalakeExceptionsExpiryRequest"}, - "output":{"shape":"GetDatalakeExceptionsExpiryResponse"}, + "input":{"shape":"GetDataLakeExceptionSubscriptionRequest"}, + "output":{"shape":"GetDataLakeExceptionSubscriptionResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Retrieves the expiration period and time-to-live (TTL) for which the exception message will remain. Exceptions are stored by default, for 2 weeks from when a record was created in Amazon Security Lake. This API does not take input parameters.

      " + "documentation":"

      Retrieves the details of exception notifications for the account in Amazon Security Lake.

      " }, - "GetDatalakeExceptionsSubscription":{ - "name":"GetDatalakeExceptionsSubscription", + "GetDataLakeOrganizationConfiguration":{ + "name":"GetDataLakeOrganizationConfiguration", "http":{ "method":"GET", - "requestUri":"/v1/datalake/exceptions/subscription", + "requestUri":"/v1/datalake/organization/configuration", "responseCode":200 }, - "input":{"shape":"GetDatalakeExceptionsSubscriptionRequest"}, - "output":{"shape":"GetDatalakeExceptionsSubscriptionResponse"}, + "input":{"shape":"GetDataLakeOrganizationConfigurationRequest"}, + "output":{"shape":"GetDataLakeOrganizationConfigurationResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Retrieves the details of exception notifications for the account in Amazon Security Lake.

      " + "documentation":"

      Retrieves the configuration that will be automatically set up for accounts added to the organization after the organization has onboarded to Amazon Security Lake. This API does not take input parameters.

      " }, - "GetDatalakeStatus":{ - "name":"GetDatalakeStatus", + "GetDataLakeSources":{ + "name":"GetDataLakeSources", "http":{ "method":"POST", - "requestUri":"/v1/datalake/status", + "requestUri":"/v1/datalake/sources", "responseCode":200 }, - "input":{"shape":"GetDatalakeStatusRequest"}, - "output":{"shape":"GetDatalakeStatusResponse"}, + "input":{"shape":"GetDataLakeSourcesRequest"}, + "output":{"shape":"GetDataLakeSourcesResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Retrieves a snapshot of the current Region, including whether Amazon Security Lake is enabled for those accounts and which sources Security Lake is collecting data from.

      " + "documentation":"

      Retrieves a snapshot of the current Region, including whether Amazon Security Lake is enabled for those accounts and which sources Security Lake is collecting data from.

      " }, "GetSubscriber":{ "name":"GetSubscriber", "http":{ "method":"GET", - "requestUri":"/v1/subscribers/{id}", + "requestUri":"/v1/subscribers/{subscriberId}", "responseCode":200 }, "input":{"shape":"GetSubscriberRequest"}, "output":{"shape":"GetSubscriberResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      Retrieves the subscription information for the specified subscription ID. You can get information about a specific subscriber.

      " }, - "ListDatalakeExceptions":{ - "name":"ListDatalakeExceptions", + "ListDataLakeExceptions":{ + "name":"ListDataLakeExceptions", "http":{ "method":"POST", "requestUri":"/v1/datalake/exceptions", "responseCode":200 }, - "input":{"shape":"ListDatalakeExceptionsRequest"}, - "output":{"shape":"ListDatalakeExceptionsResponse"}, + "input":{"shape":"ListDataLakeExceptionsRequest"}, + "output":{"shape":"ListDataLakeExceptionsResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      Lists the Amazon Security Lake exceptions that you can use to find the source of problems and fix them.

      " }, + "ListDataLakes":{ + "name":"ListDataLakes", + "http":{ + "method":"GET", + "requestUri":"/v1/datalakes", + "responseCode":200 + }, + "input":{"shape":"ListDataLakesRequest"}, + "output":{"shape":"ListDataLakesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Retrieves the Amazon Security Lake configuration object for the specified Amazon Web Services account ID. You can use the ListDataLakes API to know whether Security Lake is enabled for any region.

      " + }, "ListLogSources":{ "name":"ListLogSources", "http":{ "method":"POST", - "requestUri":"/v1/logsources/list", + "requestUri":"/v1/datalake/logsources/list", "responseCode":200 }, "input":{"shape":"ListLogSourcesRequest"}, "output":{"shape":"ListLogSourcesResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Retrieves the log sources in the current Amazon Web Services Region.

      " + "documentation":"

      Retrieves the log sources in the current Amazon Web Services Region.

      " }, "ListSubscribers":{ "name":"ListSubscribers", @@ -465,114 +447,118 @@ "input":{"shape":"ListSubscribersRequest"}, "output":{"shape":"ListSubscribersResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

      List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account.

      " }, - "UpdateDatalake":{ - "name":"UpdateDatalake", + "RegisterDataLakeDelegatedAdministrator":{ + "name":"RegisterDataLakeDelegatedAdministrator", "http":{ - "method":"PUT", - "requestUri":"/v1/datalake", + "method":"POST", + "requestUri":"/v1/datalake/delegate", "responseCode":200 }, - "input":{"shape":"UpdateDatalakeRequest"}, - "output":{"shape":"UpdateDatalakeResponse"}, + "input":{"shape":"RegisterDataLakeDelegatedAdministratorRequest"}, + "output":{"shape":"RegisterDataLakeDelegatedAdministratorResponse"}, "errors":[ - {"shape":"EventBridgeException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions.

      ", - "idempotent":true + "documentation":"

      Designates the Amazon Security Lake delegated administrator account for the organization. This API can only be called by the organization management account. The organization management account cannot be the delegated administrator account.

      " }, - "UpdateDatalakeExceptionsExpiry":{ - "name":"UpdateDatalakeExceptionsExpiry", + "UpdateDataLake":{ + "name":"UpdateDataLake", "http":{ "method":"PUT", - "requestUri":"/v1/datalake/exceptions/expiry", + "requestUri":"/v1/datalake", "responseCode":200 }, - "input":{"shape":"UpdateDatalakeExceptionsExpiryRequest"}, - "output":{"shape":"UpdateDatalakeExceptionsExpiryResponse"}, + "input":{"shape":"UpdateDataLakeRequest"}, + "output":{"shape":"UpdateDataLakeResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Update the expiration period for the exception message to your preferred time, and control the time-to-live (TTL) for the exception message to remain. Exceptions are stored by default for 2 weeks from when a record was created in Amazon Security Lake.

      " + "documentation":"

      Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions.

      ", + "idempotent":true }, - "UpdateDatalakeExceptionsSubscription":{ - "name":"UpdateDatalakeExceptionsSubscription", + "UpdateDataLakeExceptionSubscription":{ + "name":"UpdateDataLakeExceptionSubscription", "http":{ "method":"PUT", "requestUri":"/v1/datalake/exceptions/subscription", "responseCode":200 }, - "input":{"shape":"UpdateDatalakeExceptionsSubscriptionRequest"}, - "output":{"shape":"UpdateDatalakeExceptionsSubscriptionResponse"}, + "input":{"shape":"UpdateDataLakeExceptionSubscriptionRequest"}, + "output":{"shape":"UpdateDataLakeExceptionSubscriptionResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Updates the specified notification subscription in Amazon Security Lake for the organization you specify.

      " + "documentation":"

      Updates the specified notification subscription in Amazon Security Lake for the organization you specify.

      ", + "idempotent":true }, "UpdateSubscriber":{ "name":"UpdateSubscriber", "http":{ "method":"PUT", - "requestUri":"/v1/subscribers/{id}", + "requestUri":"/v1/subscribers/{subscriberId}", "responseCode":200 }, "input":{"shape":"UpdateSubscriberRequest"}, "output":{"shape":"UpdateSubscriberResponse"}, "errors":[ - {"shape":"ConflictSubscriptionException"}, - {"shape":"ConcurrentModificationException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Updates an existing subscription for the given Amazon Security Lake account ID. You can update a subscriber by changing the sources that the subscriber consumes data from.

      ", + "documentation":"

      Updates an existing subscription for the given Amazon Security Lake account ID. You can update a subscriber by changing the sources that the subscriber consumes data from.

      ", "idempotent":true }, - "UpdateSubscriptionNotificationConfiguration":{ - "name":"UpdateSubscriptionNotificationConfiguration", + "UpdateSubscriberNotification":{ + "name":"UpdateSubscriberNotification", "http":{ "method":"PUT", - "requestUri":"/subscription-notifications/{subscriptionId}", + "requestUri":"/v1/subscribers/{subscriberId}/notification", "responseCode":200 }, - "input":{"shape":"UpdateSubscriptionNotificationConfigurationRequest"}, - "output":{"shape":"UpdateSubscriptionNotificationConfigurationResponse"}, + "input":{"shape":"UpdateSubscriberNotificationRequest"}, + "output":{"shape":"UpdateSubscriberNotificationResponse"}, "errors":[ - {"shape":"ConcurrentModificationException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccountNotFoundException"}, - {"shape":"InvalidInputException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

      Updates an existing notification method for the subscription (SQS or HTTPs endpoint) or switches the notification subscription endpoint for a subscriber.

      " + "documentation":"

      Updates an existing notification method for the subscription (SQS or HTTPs endpoint) or switches the notification subscription endpoint for a subscriber.

      ", + "idempotent":true } }, "shapes":{ "AccessDeniedException":{ "type":"structure", - "required":["message"], "members":{ "errorCode":{ "shape":"String", @@ -600,144 +586,130 @@ }, "AccountList":{ "type":"list", - "member":{"shape":"String"} + "member":{"shape":"AwsAccountId"} + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" }, - "AccountNotFoundException":{ + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]{12}$" + }, + "AwsIdentity":{ "type":"structure", - "required":["message"], + "required":[ + "externalId", + "principal" + ], "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      Amazon Security Lake cannot find an Amazon Web Services account with the accountID that you specified, or the account whose credentials you used to make this request isn't a member of an organization.

      ", - "error":{ - "httpStatusCode":403, - "senderFault":true + "externalId":{ + "shape":"ExternalId", + "documentation":"

      The external ID used to estalish trust relationship with the AWS identity.

      " + }, + "principal":{ + "shape":"AwsPrincipal", + "documentation":"

      The AWS identity principal.

      " + } }, - "exception":true + "documentation":"

      The AWS identity.

      " }, - "AccountSources":{ + "AwsLogSourceConfiguration":{ "type":"structure", "required":[ - "account", - "sourceType" + "regions", + "sourceName" ], "members":{ - "account":{ - "shape":"String", - "documentation":"

      The ID of the Security Lake account for which logs are collected.

      " + "accounts":{ + "shape":"AccountList", + "documentation":"

      Specify the Amazon Web Services account information where you want to enable Security Lake.

      " }, - "eventClass":{ - "shape":"OcsfEventClass", - "documentation":"

      Initializes a new instance of the Event class.

      " + "regions":{ + "shape":"RegionList", + "documentation":"

      Specify the Regions where you want to enable Security Lake.

      " }, - "logsStatus":{ - "shape":"LogsStatusList", - "documentation":"

      The log status for the Security Lake account.

      " + "sourceName":{ + "shape":"AwsLogSourceName", + "documentation":"

      The name for a Amazon Web Services source. This must be a Regionally unique value.

      " }, - "sourceType":{ - "shape":"String", - "documentation":"

      The supported Amazon Web Services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

      " + "sourceVersion":{ + "shape":"AwsLogSourceVersion", + "documentation":"

      The version for a Amazon Web Services source. This must be a Regionally unique value.

      " } }, - "documentation":"

      Amazon Security Lake collects logs and events from supported Amazon Web Services and custom sources. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

      " + "documentation":"

      The Security Lake logs source configuration file describes the information needed to generate Security Lake logs.

      " }, - "AccountSourcesList":{ + "AwsLogSourceConfigurationList":{ "type":"list", - "member":{"shape":"AccountSources"} + "member":{"shape":"AwsLogSourceConfiguration"}, + "max":50, + "min":0 }, - "AllDimensionsMap":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"TwoDimensionsMap"} + "AwsLogSourceName":{ + "type":"string", + "enum":[ + "ROUTE53", + "VPC_FLOW", + "SH_FINDINGS", + "CLOUD_TRAIL_MGMT", + "LAMBDA_EXECUTION", + "S3_DATA" + ] }, - "AutoEnableNewRegionConfiguration":{ + "AwsLogSourceResource":{ "type":"structure", - "required":[ - "region", - "sources" - ], "members":{ - "region":{ - "shape":"Region", - "documentation":"

      The Amazon Web Services Regions where Security Lake is automatically enabled.

      " + "sourceName":{ + "shape":"AwsLogSourceName", + "documentation":"

      The name for a Amazon Web Services source. This must be a Regionally unique value.

      " }, - "sources":{ - "shape":"AwsSourceTypeList", - "documentation":"

      The Amazon Web Services sources that are automatically enabled in Security Lake.

      " + "sourceVersion":{ + "shape":"AwsLogSourceVersion", + "documentation":"

      The version for a Amazon Web Services source. This must be a Regionally unique value.

      " } }, - "documentation":"

      Automatically enable new organization accounts as member accounts from an Amazon Security Lake administrator account.

      " + "documentation":"

      Amazon Security Lake can collect logs and events from natively-supported Amazon Web Services services.

      " }, - "AutoEnableNewRegionConfigurationList":{ + "AwsLogSourceResourceList":{ "type":"list", - "member":{"shape":"AutoEnableNewRegionConfiguration"} + "member":{"shape":"AwsLogSourceResource"} }, - "AwsAccountId":{ + "AwsLogSourceVersion":{ "type":"string", - "max":12, - "min":12, - "pattern":"^\\d+$" + "pattern":"^(latest|[0-9]\\.[0-9])$" }, - "AwsLogSourceType":{ + "AwsPrincipal":{ "type":"string", - "enum":[ - "ROUTE53", - "VPC_FLOW", - "CLOUD_TRAIL", - "SH_FINDINGS" - ] + "pattern":"^([0-9]{12}|[a-z0-9\\.\\-]*\\.(amazonaws|amazon)\\.com)$" }, - "AwsSourceTypeList":{ - "type":"list", - "member":{"shape":"AwsLogSourceType"} - }, - "Boolean":{ - "type":"boolean", - "box":true - }, - "BucketNotFoundException":{ + "BadRequestException":{ "type":"structure", - "required":["message"], "members":{ "message":{"shape":"String"} }, - "documentation":"

      Amazon Security Lake generally returns 404 errors if the requested object is missing from the bucket.

      ", + "documentation":"

      The request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

      ", "error":{ - "httpStatusCode":409, + "httpStatusCode":400, "senderFault":true }, "exception":true }, - "ConcurrentModificationException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      More than one process tried to modify a resource at the same time.

      ", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true, - "retryable":{"throttling":false} - }, "ConflictException":{ "type":"structure", - "required":[ - "message", - "resourceId", - "resourceType" - ], "members":{ "message":{"shape":"String"}, - "resourceId":{ + "resourceName":{ "shape":"String", - "documentation":"

      A conflict occurred when prompting for the Resource ID.

      " + "documentation":"

      The resource name.

      " }, "resourceType":{ "shape":"String", - "documentation":"

      The resource type.

      " + "documentation":"

      The resource type.

      " } }, "documentation":"

      Occurs when a conflict with a previous successful write is detected. This generally occurs when the previous write did not have time to propagate to the host serving the current request. A retry (with appropriate backoff logic) is the recommended response to this exception.

      ", @@ -747,51 +719,13 @@ }, "exception":true }, - "ConflictSourceNamesException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      There was a conflict when you attempted to modify a Security Lake source name.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ConflictSubscriptionException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      A conflicting subscription exception operation is in progress.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, "CreateAwsLogSourceRequest":{ "type":"structure", - "required":["inputOrder"], + "required":["sources"], "members":{ - "enableAllDimensions":{ - "shape":"AllDimensionsMap", - "documentation":"

      Enables data collection from specific Amazon Web Services sources in all specific accounts and specific Regions.

      " - }, - "enableSingleDimension":{ - "shape":"InputSet", - "documentation":"

      Enables data collection from all Amazon Web Services sources in specific accounts or Regions.

      " - }, - "enableTwoDimensions":{ - "shape":"TwoDimensionsMap", - "documentation":"

      Enables data collection from specific Amazon Web Services sources in specific accounts or Regions.

      " - }, - "inputOrder":{ - "shape":"DimensionSet", - "documentation":"

      Specifies the input order to enable dimensions in Security Lake, namely Region, source type, and member account.

      " + "sources":{ + "shape":"AwsLogSourceConfigurationList", + "documentation":"

      Specify the natively-supported Amazon Web Services service to add as a source in Security Lake.

      " } } }, @@ -801,156 +735,145 @@ "failed":{ "shape":"AccountList", "documentation":"

      Lists all accounts in which enabling a natively supported Amazon Web Service as a Security Lake source failed. The failure occurred as these accounts are not part of an organization.

      " - }, - "processing":{ - "shape":"AccountList", - "documentation":"

      Lists the accounts that are in the process of enabling a natively supported Amazon Web Service as a Security Lake source.

      " } } }, "CreateCustomLogSourceRequest":{ "type":"structure", - "required":[ - "customSourceName", - "eventClass", - "glueInvocationRoleArn", - "logProviderAccountId" - ], + "required":["sourceName"], "members":{ - "customSourceName":{ - "shape":"CustomSourceType", - "documentation":"

      The name for a third-party custom source. This must be a Regionally unique value.

      " + "configuration":{ + "shape":"CustomLogSourceConfiguration", + "documentation":"

      The configuration for the third-party custom source.

      " }, - "eventClass":{ - "shape":"OcsfEventClass", - "documentation":"

      The Open Cybersecurity Schema Framework (OCSF) event class which describes the type of data that the custom source will send to Security Lake.

      " + "eventClasses":{ + "shape":"OcsfEventClassList", + "documentation":"

      The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are:

      • ACCESS_ACTIVITY

      • FILE_ACTIVITY

      • KERNEL_ACTIVITY

      • KERNEL_EXTENSION

      • MEMORY_ACTIVITY

      • MODULE_ACTIVITY

      • PROCESS_ACTIVITY

      • REGISTRY_KEY_ACTIVITY

      • REGISTRY_VALUE_ACTIVITY

      • RESOURCE_ACTIVITY

      • SCHEDULED_JOB_ACTIVITY

      • SECURITY_FINDING

      • ACCOUNT_CHANGE

      • AUTHENTICATION

      • AUTHORIZATION

      • ENTITY_MANAGEMENT_AUDIT

      • DHCP_ACTIVITY

      • NETWORK_ACTIVITY

      • DNS_ACTIVITY

      • FTP_ACTIVITY

      • HTTP_ACTIVITY

      • RDP_ACTIVITY

      • SMB_ACTIVITY

      • SSH_ACTIVITY

      • CONFIG_STATE

      • INVENTORY_INFO

      • EMAIL_ACTIVITY

      • API_ACTIVITY

      • CLOUD_API

      " }, - "glueInvocationRoleArn":{ - "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be used by the Glue crawler. The recommended IAM policies are:

      • The managed policy AWSGlueServiceRole

      • A custom policy granting access to your Amazon S3 Data Lake

      " + "sourceName":{ + "shape":"CustomLogSourceName", + "documentation":"

      Specify the name for a third-party custom source. This must be a Regionally unique value.

      " }, - "logProviderAccountId":{ - "shape":"AwsAccountId", - "documentation":"

      The Amazon Web Services account ID of the custom source that will write logs and events into the Amazon S3 Data Lake.

      " + "sourceVersion":{ + "shape":"CustomLogSourceVersion", + "documentation":"

      Specify the source version for the third-party custom source, to limit log collection to a specific version of custom data source.

      " } } }, "CreateCustomLogSourceResponse":{ "type":"structure", - "required":[ - "customDataLocation", - "glueCrawlerName", - "glueDatabaseName", - "glueTableName", - "logProviderAccessRoleArn" - ], "members":{ - "customDataLocation":{ - "shape":"String", - "documentation":"

      The location of the partition in the Amazon S3 bucket for Security Lake.

      " - }, - "glueCrawlerName":{ - "shape":"String", - "documentation":"

      The name of the Glue crawler.

      " - }, - "glueDatabaseName":{ - "shape":"String", - "documentation":"

      The Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*.

      " - }, - "glueTableName":{ - "shape":"String", - "documentation":"

      The table name of the Glue crawler.

      " - }, - "logProviderAccessRoleArn":{ - "shape":"String", - "documentation":"

      The ARN of the IAM role to be used by the entity putting logs into your custom source partition. Security Lake will apply the correct access policies to this role, but you must first manually create the trust policy for this role. The IAM role name must start with the text 'Security Lake'. The IAM role must trust the logProviderAccountId to assume the role.

      " + "source":{ + "shape":"CustomLogSourceResource", + "documentation":"

      The created third-party custom source.

      " } } }, - "CreateDatalakeAutoEnableRequest":{ + "CreateDataLakeExceptionSubscriptionRequest":{ "type":"structure", - "required":["configurationForNewAccounts"], + "required":[ + "notificationEndpoint", + "subscriptionProtocol" + ], "members":{ - "configurationForNewAccounts":{ - "shape":"AutoEnableNewRegionConfigurationList", - "documentation":"

      Enable Security Lake with the specified configuration settings to begin collecting security data for new accounts in your organization.

      " + "exceptionTimeToLive":{ + "shape":"CreateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong", + "documentation":"

      The expiration period and time-to-live (TTL).

      " + }, + "notificationEndpoint":{ + "shape":"SafeString", + "documentation":"

      The Amazon Web Services account where you want to receive exception notifications.

      " + }, + "subscriptionProtocol":{ + "shape":"SubscriptionProtocol", + "documentation":"

      The subscription protocol to which exception notifications are posted.

      " } } }, - "CreateDatalakeAutoEnableResponse":{ + "CreateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong":{ + "type":"long", + "box":true, + "min":1 + }, + "CreateDataLakeExceptionSubscriptionResponse":{ "type":"structure", "members":{ } }, - "CreateDatalakeDelegatedAdminRequest":{ + "CreateDataLakeOrganizationConfigurationRequest":{ "type":"structure", - "required":["account"], + "required":["autoEnableNewAccount"], "members":{ - "account":{ - "shape":"SafeString", - "documentation":"

      The Amazon Web Services account ID of the Security Lake delegated administrator.

      " + "autoEnableNewAccount":{ + "shape":"DataLakeAutoEnableNewAccountConfigurationList", + "documentation":"

      Enable Security Lake with the specified configuration settings, to begin collecting security data for new accounts in your organization.

      " } } }, - "CreateDatalakeDelegatedAdminResponse":{ + "CreateDataLakeOrganizationConfigurationResponse":{ "type":"structure", "members":{ } }, - "CreateDatalakeExceptionsSubscriptionRequest":{ + "CreateDataLakeRequest":{ "type":"structure", "required":[ - "notificationEndpoint", - "subscriptionProtocol" + "configurations", + "metaStoreManagerRoleArn" ], "members":{ - "notificationEndpoint":{ - "shape":"SafeString", - "documentation":"

      The Amazon Web Services account where you want to receive exception notifications.

      " + "configurations":{ + "shape":"DataLakeConfigurationList", + "documentation":"

      Specify the Region or Regions that will contribute data to the rollup region.

      " }, - "subscriptionProtocol":{ - "shape":"SubscriptionProtocolType", - "documentation":"

      The subscription protocol to which exception notifications are posted.

      " + "metaStoreManagerRoleArn":{ + "shape":"RoleArn", + "documentation":"

      The Amazon Resource Name (ARN) used to create and update the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources.

      " } } }, - "CreateDatalakeExceptionsSubscriptionResponse":{ + "CreateDataLakeResponse":{ "type":"structure", "members":{ + "dataLakes":{ + "shape":"DataLakeResourceList", + "documentation":"

      The created Security Lake configuration object.

      " + } } }, - "CreateDatalakeRequest":{ + "CreateSubscriberNotificationRequest":{ "type":"structure", + "required":[ + "configuration", + "subscriberId" + ], "members":{ - "configurations":{ - "shape":"LakeConfigurationRequestMap", - "documentation":"

      Specify the Region or Regions that will contribute data to the rollup region.

      " - }, - "enableAll":{ - "shape":"Boolean", - "documentation":"

      Enable Security Lake in all Regions.

      " - }, - "metaStoreManagerRoleArn":{ - "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) used to create and update the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources.

      " + "configuration":{ + "shape":"NotificationConfiguration", + "documentation":"

      Specify the configuration using which you want to create the subscriber notification.

      " }, - "regions":{ - "shape":"RegionSet", - "documentation":"

      Enable Security Lake in the specified Regions. To enable Security Lake in specific Amazon Web Services Regions, such as us-east-1 or ap-northeast-3, provide the Region codes. For a list of Region codes, see Amazon Security Lake endpoints in the Amazon Web Services General Reference.

      " + "subscriberId":{ + "shape":"UUID", + "documentation":"

      The subscriber ID for the notification subscription.

      ", + "location":"uri", + "locationName":"subscriberId" } } }, - "CreateDatalakeResponse":{ + "CreateSubscriberNotificationResponse":{ "type":"structure", "members":{ + "subscriberEndpoint":{ + "shape":"SafeString", + "documentation":"

      The subscriber endpoint to which exception messages are posted.

      " + } } }, "CreateSubscriberRequest":{ "type":"structure", "required":[ - "accountId", - "externalId", - "sourceTypes", + "sources", + "subscriberIdentity", "subscriberName" ], "members":{ @@ -958,21 +881,17 @@ "shape":"AccessTypeList", "documentation":"

      The Amazon S3 or Lake Formation access type.

      " }, - "accountId":{ - "shape":"AwsAccountId", - "documentation":"

      The Amazon Web Services account ID used to access your data.

      " - }, - "externalId":{ - "shape":"SafeString", - "documentation":"

      The external ID of the subscriber. This lets the user that is assuming the role assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances.

      " - }, - "sourceTypes":{ - "shape":"SourceTypeList", + "sources":{ + "shape":"LogSourceResourceList", "documentation":"

      The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services.

      " }, "subscriberDescription":{ "shape":"DescriptionString", - "documentation":"

      The description for your subscriber account in Security Lake.

      " + "documentation":"

      The description for your subscriber account in Security Lake.

      " + }, + "subscriberIdentity":{ + "shape":"AwsIdentity", + "documentation":"

      The AWS identity used to access your data.

      " }, "subscriberName":{ "shape":"CreateSubscriberRequestSubscriberNameString", @@ -987,416 +906,606 @@ }, "CreateSubscriberResponse":{ "type":"structure", - "required":["subscriptionId"], "members":{ - "resourceShareArn":{ - "shape":"ResourceShareArn", - "documentation":"

      The Amazon Resource Name (ARN) which uniquely defines the AWS RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share.

      " + "subscriber":{ + "shape":"SubscriberResource", + "documentation":"

      Retrieve information about the subscriber created using the CreateSubscriber API.

      " + } + } + }, + "CustomLogSourceAttributes":{ + "type":"structure", + "members":{ + "crawlerArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The ARN of the Glue crawler.

      " }, - "resourceShareName":{ - "shape":"ResourceShareName", - "documentation":"

      The name of the resource share.

      " + "databaseArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The ARN of the Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/*.

      " + }, + "tableArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The ARN of the Glue table.

      " + } + }, + "documentation":"

      The attributes of a third-party custom source.

      " + }, + "CustomLogSourceConfiguration":{ + "type":"structure", + "required":[ + "crawlerConfiguration", + "providerIdentity" + ], + "members":{ + "crawlerConfiguration":{ + "shape":"CustomLogSourceCrawlerConfiguration", + "documentation":"

      The configuration for the Glue Crawler for the third-party custom source.

      " }, + "providerIdentity":{ + "shape":"AwsIdentity", + "documentation":"

      The identity of the log provider for the third-party custom source.

      " + } + }, + "documentation":"

      The configuration for the third-party custom source.

      " + }, + "CustomLogSourceCrawlerConfiguration":{ + "type":"structure", + "required":["roleArn"], + "members":{ "roleArn":{ "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) created by you to provide to the subscriber. For more information about ARNs and how to use them in policies, see Amazon Security Lake User Guide.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be used by the Glue crawler. The recommended IAM policies are:

      • The managed policy AWSGlueServiceRole

      • A custom policy granting access to your Amazon S3 Data Lake

      " + } + }, + "documentation":"

      The configuration for the Glue Crawler for the third-party custom source.

      " + }, + "CustomLogSourceName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\\\\\w\\-_:/.]*$" + }, + "CustomLogSourceProvider":{ + "type":"structure", + "members":{ + "location":{ + "shape":"S3URI", + "documentation":"

      The location of the partition in the Amazon S3 bucket for Security Lake.

      " }, - "s3BucketArn":{ - "shape":"S3BucketArn", - "documentation":"

      The ARN for the Amazon S3 bucket.

      " + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      The ARN of the IAM role to be used by the entity putting logs into your custom source partition. Security Lake will apply the correct access policies to this role, but you must first manually create the trust policy for this role. The IAM role name must start with the text 'Security Lake'. The IAM role must trust the logProviderAccountId to assume the role.

      " + } + }, + "documentation":"

      The details of the log provider for a third-party custom source.

      " + }, + "CustomLogSourceResource":{ + "type":"structure", + "members":{ + "attributes":{ + "shape":"CustomLogSourceAttributes", + "documentation":"

      The attributes of a third-party custom source.

      " }, - "snsArn":{ - "shape":"SnsTopicArn", - "documentation":"

      The ARN for the Amazon Simple Notification Service.

      " + "provider":{ + "shape":"CustomLogSourceProvider", + "documentation":"

      The details of the log provider for a third-party custom source.

      " }, - "subscriptionId":{ - "shape":"UUID", - "documentation":"

      The subscriptionId created by the CreateSubscriber API call.

      " + "sourceName":{ + "shape":"CustomLogSourceName", + "documentation":"

      The name for a third-party custom source. This must be a Regionally unique value.

      " + }, + "sourceVersion":{ + "shape":"CustomLogSourceVersion", + "documentation":"

      The version for a third-party custom source. This must be a Regionally unique value.

      " } - } + }, + "documentation":"

      Amazon Security Lake can collect logs and events from third-party custom sources.

      " }, - "CreateSubscriptionNotificationConfigurationRequest":{ + "CustomLogSourceVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z0-9\\-\\.\\_]*$" + }, + "DataLakeAutoEnableNewAccountConfiguration":{ "type":"structure", - "required":["subscriptionId"], + "required":[ + "region", + "sources" + ], "members":{ - "createSqs":{ - "shape":"Boolean", - "documentation":"

      Create an Amazon Simple Queue Service queue.

      " + "region":{ + "shape":"Region", + "documentation":"

      The Amazon Web Services Regions where Security Lake is automatically enabled.

      " }, - "httpsApiKeyName":{ - "shape":"String", - "documentation":"

      The key name for the notification subscription.

      " + "sources":{ + "shape":"AwsLogSourceResourceList", + "documentation":"

      The Amazon Web Services sources that are automatically enabled in Security Lake.

      " + } + }, + "documentation":"

      Automatically enable new organization accounts as member accounts from an Amazon Security Lake administrator account.

      " + }, + "DataLakeAutoEnableNewAccountConfigurationList":{ + "type":"list", + "member":{"shape":"DataLakeAutoEnableNewAccountConfiguration"} + }, + "DataLakeConfiguration":{ + "type":"structure", + "required":["region"], + "members":{ + "encryptionConfiguration":{ + "shape":"DataLakeEncryptionConfiguration", + "documentation":"

      Provides encryption details of Amazon Security Lake object.

      " + }, + "lifecycleConfiguration":{ + "shape":"DataLakeLifecycleConfiguration", + "documentation":"

      Provides lifecycle details of Amazon Security Lake object.

      " + }, + "region":{ + "shape":"Region", + "documentation":"

      The Amazon Web Services Regions where Security Lake is automatically enabled.

      " }, - "httpsApiKeyValue":{ + "replicationConfiguration":{ + "shape":"DataLakeReplicationConfiguration", + "documentation":"

      Provides replication details of Amazon Security Lake object.

      " + } + }, + "documentation":"

      Provides details of Amazon Security Lake object.

      " + }, + "DataLakeConfigurationList":{ + "type":"list", + "member":{"shape":"DataLakeConfiguration"} + }, + "DataLakeEncryptionConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyId":{ "shape":"String", - "documentation":"

      The key value for the notification subscription.

      " + "documentation":"

      The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object.

      " + } + }, + "documentation":"

      Provides encryption details of Amazon Security Lake object.

      " + }, + "DataLakeException":{ + "type":"structure", + "members":{ + "exception":{ + "shape":"SafeString", + "documentation":"

      The underlying exception of a Security Lake exception.

      " }, - "httpsMethod":{ - "shape":"HttpsMethod", - "documentation":"

      The HTTPS method used for the notification subscription.

      " + "region":{ + "shape":"Region", + "documentation":"

      The Amazon Web Services Regions where the exception occurred.

      " }, - "roleArn":{ - "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) of the EventBridge API destinations IAM role that you created. For more information about ARNs and how to use them in policies, see Managing data access and Amazon Web Services Managed Policies in the Amazon Security Lake User Guide.

      " + "remediation":{ + "shape":"SafeString", + "documentation":"

      List of all remediation steps for a Security Lake exception.

      " }, - "subscriptionEndpoint":{ - "shape":"CreateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString", - "documentation":"

      The subscription endpoint in Security Lake. If you prefer notification with an HTTPs endpoint, populate this field.

      " + "timestamp":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

      This error can occur if you configure the wrong timestamp format, or if the subset of entries used for validation had errors or missing values.

      " + } + }, + "documentation":"

      The details for a Security Lake exception

      " + }, + "DataLakeExceptionList":{ + "type":"list", + "member":{"shape":"DataLakeException"} + }, + "DataLakeLifecycleConfiguration":{ + "type":"structure", + "members":{ + "expiration":{ + "shape":"DataLakeLifecycleExpiration", + "documentation":"

      Provides data expiration details of Amazon Security Lake object.

      " }, - "subscriptionId":{ - "shape":"UUID", - "documentation":"

      The subscription ID for the notification subscription.

      ", - "location":"uri", - "locationName":"subscriptionId" + "transitions":{ + "shape":"DataLakeLifecycleTransitionList", + "documentation":"

      Provides data storage transition details of Amazon Security Lake object.

      " } - } + }, + "documentation":"

      Provides lifecycle details of Amazon Security Lake object.

      " }, - "CreateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString":{ - "type":"string", - "pattern":"^(arn:aws:.+$|https?://.+$)" + "DataLakeLifecycleExpiration":{ + "type":"structure", + "members":{ + "days":{ + "shape":"DataLakeLifecycleExpirationDaysInteger", + "documentation":"

      Number of days before data expires in the Amazon Security Lake object.

      " + } + }, + "documentation":"

      Provide expiration lifecycle details of Amazon Security Lake object.

      " + }, + "DataLakeLifecycleExpirationDaysInteger":{ + "type":"integer", + "box":true, + "min":1 }, - "CreateSubscriptionNotificationConfigurationResponse":{ + "DataLakeLifecycleTransition":{ "type":"structure", "members":{ - "queueArn":{ - "shape":"SafeString", - "documentation":"

      Returns the Amazon Resource Name (ARN) of the queue.

      " + "days":{ + "shape":"DataLakeLifecycleTransitionDaysInteger", + "documentation":"

      Number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object.

      " + }, + "storageClass":{ + "shape":"DataLakeStorageClass", + "documentation":"

      The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads.

      " } - } + }, + "documentation":"

      Provide transition lifecycle details of Amazon Security Lake object.

      " }, - "CustomSourceType":{ - "type":"string", - "pattern":"^[\\\\\\w\\-_:/.]*$" + "DataLakeLifecycleTransitionDaysInteger":{ + "type":"integer", + "box":true, + "min":1 }, - "DeleteAwsLogSourceRequest":{ + "DataLakeLifecycleTransitionList":{ + "type":"list", + "member":{"shape":"DataLakeLifecycleTransition"} + }, + "DataLakeReplicationConfiguration":{ "type":"structure", - "required":["inputOrder"], "members":{ - "disableAllDimensions":{ - "shape":"AllDimensionsMap", - "documentation":"

      Removes the specific Amazon Web Services sources from specific accounts and specific Regions.

      " + "regions":{ + "shape":"RegionList", + "documentation":"

      Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

      Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

      " }, - "disableSingleDimension":{ - "shape":"InputSet", - "documentation":"

      Removes all Amazon Web Services sources from specific accounts or Regions.

      " + "roleArn":{ + "shape":"RoleArn", + "documentation":"

      Replication settings for the Amazon S3 buckets. This parameter uses the Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct.

      " + } + }, + "documentation":"

      Provides replication details of Amazon Security Lake object.

      " + }, + "DataLakeResource":{ + "type":"structure", + "required":[ + "dataLakeArn", + "region" + ], + "members":{ + "createStatus":{ + "shape":"DataLakeStatus", + "documentation":"

      Retrieves the status of the configuration operation for an account in Amazon Security Lake.

      " }, - "disableTwoDimensions":{ - "shape":"TwoDimensionsMap", - "documentation":"

      Remove a specific Amazon Web Services source from specific accounts or Regions.

      " + "dataLakeArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The Amazon Resource Name (ARN) created by you to provide to the subscriber. For more information about ARNs and how to use them in policies, see the Amazon Security Lake User Guide.

      " }, - "inputOrder":{ - "shape":"DimensionSet", - "documentation":"

      This is a mandatory input. Specify the input order to disable dimensions in Security Lake, namely Region (Amazon Web Services Region code, source type, and member (account ID of a specific Amazon Web Services account).

      " + "encryptionConfiguration":{ + "shape":"DataLakeEncryptionConfiguration", + "documentation":"

      Provides encryption details of Amazon Security Lake object.

      " + }, + "lifecycleConfiguration":{ + "shape":"DataLakeLifecycleConfiguration", + "documentation":"

      Provides lifecycle details of Amazon Security Lake object.

      " + }, + "region":{ + "shape":"Region", + "documentation":"

      The Amazon Web Services Regions where Security Lake is enabled.

      " + }, + "replicationConfiguration":{ + "shape":"DataLakeReplicationConfiguration", + "documentation":"

      Provides replication details of Amazon Security Lake object.

      " + }, + "s3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

      The ARN for the Amazon Security Lake Amazon S3 bucket.

      " + }, + "updateStatus":{ + "shape":"DataLakeUpdateStatus", + "documentation":"

      The status of the last UpdateDataLake or DeleteDataLake API request.

      " } - } + }, + "documentation":"

      Provides details of Amazon Security Lake object.

      " }, - "DeleteAwsLogSourceResponse":{ + "DataLakeResourceList":{ + "type":"list", + "member":{"shape":"DataLakeResource"} + }, + "DataLakeSource":{ "type":"structure", "members":{ - "failed":{ - "shape":"AccountList", - "documentation":"

      Deletion of the Amazon Web Services sources failed as the account is not a part of the organization.

      " + "account":{ + "shape":"String", + "documentation":"

      The ID of the Security Lake account for which logs are collected.

      " }, - "processing":{ - "shape":"AccountList", - "documentation":"

      Deletion of the Amazon Web Services sources is in progress.

      " + "eventClasses":{ + "shape":"OcsfEventClassList", + "documentation":"

      The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are:

      • ACCESS_ACTIVITY

      • FILE_ACTIVITY

      • KERNEL_ACTIVITY

      • KERNEL_EXTENSION

      • MEMORY_ACTIVITY

      • MODULE_ACTIVITY

      • PROCESS_ACTIVITY

      • REGISTRY_KEY_ACTIVITY

      • REGISTRY_VALUE_ACTIVITY

      • RESOURCE_ACTIVITY

      • SCHEDULED_JOB_ACTIVITY

      • SECURITY_FINDING

      • ACCOUNT_CHANGE

      • AUTHENTICATION

      • AUTHORIZATION

      • ENTITY_MANAGEMENT_AUDIT

      • DHCP_ACTIVITY

      • NETWORK_ACTIVITY

      • DNS_ACTIVITY

      • FTP_ACTIVITY

      • HTTP_ACTIVITY

      • RDP_ACTIVITY

      • SMB_ACTIVITY

      • SSH_ACTIVITY

      • CONFIG_STATE

      • INVENTORY_INFO

      • EMAIL_ACTIVITY

      • API_ACTIVITY

      • CLOUD_API

      " + }, + "sourceName":{ + "shape":"String", + "documentation":"

      The supported Amazon Web Services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

      " + }, + "sourceStatuses":{ + "shape":"DataLakeSourceStatusList", + "documentation":"

      The log status for the Security Lake account.

      " } - } + }, + "documentation":"

      Amazon Security Lake collects logs and events from supported Amazon Web Services and custom sources. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

      " }, - "DeleteCustomLogSourceRequest":{ + "DataLakeSourceList":{ + "type":"list", + "member":{"shape":"DataLakeSource"} + }, + "DataLakeSourceStatus":{ "type":"structure", - "required":["customSourceName"], "members":{ - "customSourceName":{ + "resource":{ "shape":"String", - "documentation":"

      The custom source name for the custom log source.

      ", - "location":"querystring", - "locationName":"customSourceName" + "documentation":"

      Defines path the stored logs are available which has information on your systems, applications, and services.

      " + }, + "status":{ + "shape":"SourceCollectionStatus", + "documentation":"

      The health status of services, including error codes and patterns.

      " } - } + }, + "documentation":"

      Retrieves the Logs status for the Amazon Security Lake account.

      " }, - "DeleteCustomLogSourceResponse":{ + "DataLakeSourceStatusList":{ + "type":"list", + "member":{"shape":"DataLakeSourceStatus"} + }, + "DataLakeStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "PENDING", + "COMPLETED", + "FAILED" + ] + }, + "DataLakeStorageClass":{"type":"string"}, + "DataLakeUpdateException":{ "type":"structure", - "required":["customDataLocation"], "members":{ - "customDataLocation":{ + "code":{ "shape":"String", - "documentation":"

      The location of the partition in the Amazon S3 bucket for Security Lake.

      " + "documentation":"

      The reason code for the exception of the last UpdateDataLake or DeleteDataLake API request.

      " + }, + "reason":{ + "shape":"String", + "documentation":"

      The reason for the exception of the last UpdateDataLakeor DeleteDataLake API request.

      " } - } + }, + "documentation":"

      The details of the last UpdateDataLake or DeleteDataLake API request which failed.

      " }, - "DeleteDatalakeAutoEnableRequest":{ + "DataLakeUpdateStatus":{ "type":"structure", - "required":["removeFromConfigurationForNewAccounts"], "members":{ - "removeFromConfigurationForNewAccounts":{ - "shape":"AutoEnableNewRegionConfigurationList", - "documentation":"

      Remove automatic enablement of configuration settings for new member accounts in Security Lake.

      " + "exception":{ + "shape":"DataLakeUpdateException", + "documentation":"

      The details of the last UpdateDataLakeor DeleteDataLake API request which failed.

      " + }, + "requestId":{ + "shape":"String", + "documentation":"

      The unique ID for the last UpdateDataLake or DeleteDataLake API request.

      " + }, + "status":{ + "shape":"DataLakeStatus", + "documentation":"

      The status of the last UpdateDataLake or DeleteDataLake API request that was requested.

      " + } + }, + "documentation":"

      The status of the last UpdateDataLake or DeleteDataLake API request. This is set to Completed after the configuration is updated, or removed if deletion of the data lake is successful.

      " + }, + "DeleteAwsLogSourceRequest":{ + "type":"structure", + "required":["sources"], + "members":{ + "sources":{ + "shape":"AwsLogSourceConfigurationList", + "documentation":"

      Specify the natively-supported Amazon Web Services service to remove as a source in Security Lake.

      " } } }, - "DeleteDatalakeAutoEnableResponse":{ + "DeleteAwsLogSourceResponse":{ "type":"structure", "members":{ + "failed":{ + "shape":"AccountList", + "documentation":"

      Deletion of the Amazon Web Services sources failed as the account is not a part of the organization.

      " + } } }, - "DeleteDatalakeDelegatedAdminRequest":{ + "DeleteCustomLogSourceRequest":{ "type":"structure", - "required":["account"], + "required":["sourceName"], "members":{ - "account":{ - "shape":"SafeString", - "documentation":"

      The account ID the Security Lake delegated administrator.

      ", + "sourceName":{ + "shape":"CustomLogSourceName", + "documentation":"

      The source name of custom log source that you want to delete.

      ", "location":"uri", - "locationName":"account" + "locationName":"sourceName" + }, + "sourceVersion":{ + "shape":"CustomLogSourceVersion", + "documentation":"

      The source version for the third-party custom source. You can limit the custom source removal to the specified source version.

      ", + "location":"querystring", + "locationName":"sourceVersion" } } }, - "DeleteDatalakeDelegatedAdminResponse":{ + "DeleteCustomLogSourceResponse":{ "type":"structure", "members":{ } }, - "DeleteDatalakeExceptionsSubscriptionRequest":{ + "DeleteDataLakeExceptionSubscriptionRequest":{ "type":"structure", "members":{ } }, - "DeleteDatalakeExceptionsSubscriptionResponse":{ + "DeleteDataLakeExceptionSubscriptionResponse":{ "type":"structure", - "required":["status"], "members":{ - "status":{ - "shape":"SafeString", - "documentation":"

      Retrieves the status of the delete Security Lake operation for an account.

      " - } } }, - "DeleteDatalakeRequest":{ + "DeleteDataLakeOrganizationConfigurationRequest":{ "type":"structure", + "required":["autoEnableNewAccount"], "members":{ + "autoEnableNewAccount":{ + "shape":"DataLakeAutoEnableNewAccountConfigurationList", + "documentation":"

      Removes the automatic enablement of configuration settings for new member accounts in Security Lake.

      " + } } }, - "DeleteDatalakeResponse":{ + "DeleteDataLakeOrganizationConfigurationResponse":{ "type":"structure", "members":{ } }, - "DeleteSubscriberRequest":{ + "DeleteDataLakeRequest":{ "type":"structure", - "required":["id"], + "required":["regions"], "members":{ - "id":{ - "shape":"String", - "documentation":"

      A value created by Security Lake that uniquely identifies your DeleteSubscriber API request.

      ", - "location":"querystring", - "locationName":"id" + "regions":{ + "shape":"RegionList", + "documentation":"

      The list of Regions where Security Lake is enabled.

      " } } }, - "DeleteSubscriberResponse":{ + "DeleteDataLakeResponse":{ "type":"structure", "members":{ } }, - "DeleteSubscriptionNotificationConfigurationRequest":{ + "DeleteSubscriberNotificationRequest":{ "type":"structure", - "required":["subscriptionId"], + "required":["subscriberId"], "members":{ - "subscriptionId":{ + "subscriberId":{ "shape":"UUID", "documentation":"

      The ID of the Security Lake subscriber account.

      ", "location":"uri", - "locationName":"subscriptionId" + "locationName":"subscriberId" } } }, - "DeleteSubscriptionNotificationConfigurationResponse":{ + "DeleteSubscriberNotificationResponse":{ "type":"structure", "members":{ } }, - "DescriptionString":{ - "type":"string", - "pattern":"^[\\\\\\w\\s\\-_:/,.@=+]*$" - }, - "Dimension":{ - "type":"string", - "enum":[ - "REGION", - "SOURCE_TYPE", - "MEMBER" - ] - }, - "DimensionSet":{ - "type":"list", - "member":{"shape":"Dimension"} - }, - "EndpointProtocol":{ - "type":"string", - "enum":[ - "HTTPS", - "SQS" - ] - }, - "EventBridgeException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      Represents an error interacting with the Amazon EventBridge service.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "Failures":{ - "type":"structure", - "required":[ - "exceptionMessage", - "remediation", - "timestamp" - ], - "members":{ - "exceptionMessage":{ - "shape":"SafeString", - "documentation":"

      List of all exception messages.

      " - }, - "remediation":{ - "shape":"SafeString", - "documentation":"

      List of all remediation steps for failures.

      " - }, - "timestamp":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

      This error can occur if you configure the wrong timestamp format, or if the subset of entries used for validation had errors or missing values.

      " - } - }, - "documentation":"

      List of all failures.

      " - }, - "FailuresResponse":{ + "DeleteSubscriberRequest":{ "type":"structure", + "required":["subscriberId"], "members":{ - "failures":{ - "shape":"Failureslist", - "documentation":"

      List of all failures.

      " - }, - "region":{ - "shape":"SafeString", - "documentation":"

      List of Amazon Web Services Regions where the failure occurred.

      " + "subscriberId":{ + "shape":"UUID", + "documentation":"

      A value created by Security Lake that uniquely identifies your DeleteSubscriber API request.

      ", + "location":"uri", + "locationName":"subscriberId" } - }, - "documentation":"

      Response element for actions that make changes, namely create, update, or delete actions.

      " - }, - "FailuresResponseList":{ - "type":"list", - "member":{"shape":"FailuresResponse"} - }, - "Failureslist":{ - "type":"list", - "member":{"shape":"Failures"} - }, - "GetDatalakeAutoEnableRequest":{ - "type":"structure", - "members":{ } }, - "GetDatalakeAutoEnableResponse":{ + "DeleteSubscriberResponse":{ "type":"structure", - "required":["autoEnableNewAccounts"], "members":{ - "autoEnableNewAccounts":{ - "shape":"AutoEnableNewRegionConfigurationList", - "documentation":"

      The configuration for new accounts.

      " - } } }, - "GetDatalakeExceptionsExpiryRequest":{ + "DeregisterDataLakeDelegatedAdministratorRequest":{ "type":"structure", "members":{ } }, - "GetDatalakeExceptionsExpiryResponse":{ + "DeregisterDataLakeDelegatedAdministratorResponse":{ "type":"structure", - "required":["exceptionMessageExpiry"], "members":{ - "exceptionMessageExpiry":{ - "shape":"Long", - "documentation":"

      The expiration period and time-to-live (TTL).

      " - } } }, - "GetDatalakeExceptionsSubscriptionRequest":{ + "DescriptionString":{ + "type":"string", + "pattern":"^[\\\\\\w\\s\\-_:/,.@=+]*$" + }, + "ExternalId":{ + "type":"string", + "max":1224, + "min":2, + "pattern":"^[\\w+=,.@:\\/-]*$" + }, + "GetDataLakeExceptionSubscriptionRequest":{ "type":"structure", "members":{ } }, - "GetDatalakeExceptionsSubscriptionResponse":{ + "GetDataLakeExceptionSubscriptionResponse":{ "type":"structure", - "required":["protocolAndNotificationEndpoint"], "members":{ - "protocolAndNotificationEndpoint":{ - "shape":"ProtocolAndNotificationEndpoint", - "documentation":"

      Retrieves the exception notification subscription information.

      " + "exceptionTimeToLive":{ + "shape":"Long", + "documentation":"

      The expiration period and time-to-live (TTL).

      " + }, + "notificationEndpoint":{ + "shape":"SafeString", + "documentation":"

      The Amazon Web Services account where you receive exception notifications.

      " + }, + "subscriptionProtocol":{ + "shape":"SubscriptionProtocol", + "documentation":"

      The subscription protocol to which exception notifications are posted.

      " } } }, - "GetDatalakeRequest":{ + "GetDataLakeOrganizationConfigurationRequest":{ "type":"structure", "members":{ } }, - "GetDatalakeResponse":{ + "GetDataLakeOrganizationConfigurationResponse":{ "type":"structure", - "required":["configurations"], "members":{ - "configurations":{ - "shape":"LakeConfigurationResponseMap", - "documentation":"

      Retrieves the Security Lake configuration object.

      " + "autoEnableNewAccount":{ + "shape":"DataLakeAutoEnableNewAccountConfigurationList", + "documentation":"

      The configuration for new accounts.

      " } } }, - "GetDatalakeStatusRequest":{ + "GetDataLakeSourcesRequest":{ "type":"structure", "members":{ - "accountSet":{ - "shape":"InputSet", + "accounts":{ + "shape":"AccountList", "documentation":"

      The Amazon Web Services account ID for which a static snapshot of the current Amazon Web Services Region, including enabled accounts and log sources, is retrieved.

      " }, - "maxAccountResults":{ - "shape":"Integer", + "maxResults":{ + "shape":"MaxResults", "documentation":"

      The maximum limit of accounts for which the static snapshot of the current Region, including enabled accounts and log sources, is retrieved.

      " }, "nextToken":{ - "shape":"SafeString", - "documentation":"

      Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + "shape":"NextToken", + "documentation":"

      Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " } } }, - "GetDatalakeStatusResponse":{ + "GetDataLakeSourcesResponse":{ "type":"structure", - "required":["accountSourcesList"], "members":{ - "accountSourcesList":{ - "shape":"AccountSourcesList", + "dataLakeArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The Amazon Resource Name (ARN) created by you to provide to the subscriber. For more information about ARNs and how to use them in policies, see the Amazon Security Lake User Guide.

      " + }, + "dataLakeSources":{ + "shape":"DataLakeSourceList", "documentation":"

      The list of enabled accounts and enabled sources.

      " }, "nextToken":{ - "shape":"SafeString", - "documentation":"

      Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + "shape":"NextToken", + "documentation":"

      Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " } } }, "GetSubscriberRequest":{ "type":"structure", - "required":["id"], + "required":["subscriberId"], "members":{ - "id":{ - "shape":"String", + "subscriberId":{ + "shape":"UUID", "documentation":"

      A value created by Amazon Security Lake that uniquely identifies your GetSubscriber API request.

      ", "location":"uri", - "locationName":"id" + "locationName":"subscriberId" } } }, @@ -1405,20 +1514,50 @@ "members":{ "subscriber":{ "shape":"SubscriberResource", - "documentation":"

      The subscription information for the specified subscription ID.

      " + "documentation":"

      The subscriber information for the specified subscriber ID.

      " } } }, - "HttpsMethod":{ + "HttpMethod":{ "type":"string", "enum":[ "POST", "PUT" ] }, - "InputSet":{ - "type":"list", - "member":{"shape":"SafeString"} + "HttpsNotificationConfiguration":{ + "type":"structure", + "required":[ + "endpoint", + "targetRoleArn" + ], + "members":{ + "authorizationApiKeyName":{ + "shape":"String", + "documentation":"

      The key name for the notification subscription.

      " + }, + "authorizationApiKeyValue":{ + "shape":"String", + "documentation":"

      The key value for the notification subscription.

      " + }, + "endpoint":{ + "shape":"HttpsNotificationConfigurationEndpointString", + "documentation":"

      The subscription endpoint in Security Lake. If you prefer notification with an HTTPs endpoint, populate this field.

      " + }, + "httpMethod":{ + "shape":"HttpMethod", + "documentation":"

      The HTTPS method used for the notification subscription.

      " + }, + "targetRoleArn":{ + "shape":"RoleArn", + "documentation":"

      The Amazon Resource Name (ARN) of the EventBridge API destinations IAM role that you created. For more information about ARNs and how to use them in policies, see Managing data access and Amazon Web Services Managed Policies in the Amazon Security Lake User Guide.

      " + } + }, + "documentation":"

      The configurations for HTTPS subscriber notification.

      " + }, + "HttpsNotificationConfigurationEndpointString":{ + "type":"string", + "pattern":"^https?://.+$" }, "Integer":{ "type":"integer", @@ -1426,194 +1565,100 @@ }, "InternalServerException":{ "type":"structure", - "required":["message"], "members":{ - "message":{"shape":"String"}, - "retryAfterSeconds":{ - "shape":"Integer", - "documentation":"

      Retry the request after the specified time.

      ", - "location":"header", - "locationName":"Retry-After" - } + "message":{"shape":"String"} }, - "documentation":"

      Internal service exceptions are sometimes caused by transient issues. Before you start troubleshooting, perform the operation again.

      ", + "documentation":"

      Internal service exceptions are sometimes caused by transient issues. Before you start troubleshooting, perform the operation again.

      ", "error":{"httpStatusCode":500}, "exception":true, "fault":true, "retryable":{"throttling":false} }, - "InvalidInputException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      The request was rejected because a value that's not valid or is out of range was supplied for an input parameter.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "LakeConfigurationRequest":{ - "type":"structure", - "members":{ - "encryptionKey":{ - "shape":"String", - "documentation":"

      The type of encryption key used by Amazon Security Lake to encrypt the Security Lake configuration object.

      " - }, - "replicationDestinationRegions":{ - "shape":"RegionSet", - "documentation":"

      Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

      Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

      " - }, - "replicationRoleArn":{ - "shape":"RoleArn", - "documentation":"

      Replication settings for the Amazon S3 buckets. This parameter uses the Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct.

      " - }, - "retentionSettings":{ - "shape":"RetentionSettingList", - "documentation":"

      Retention settings for the destination Amazon S3 buckets.

      " - }, - "tagsMap":{ - "shape":"TagsMap", - "documentation":"

      A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define.

      " - } - }, - "documentation":"

      Provides details of Amazon Security Lake configuration object.

      " - }, - "LakeConfigurationRequestMap":{ - "type":"map", - "key":{"shape":"Region"}, - "value":{"shape":"LakeConfigurationRequest"} - }, - "LakeConfigurationResponse":{ + "ListDataLakeExceptionsRequest":{ "type":"structure", "members":{ - "encryptionKey":{ - "shape":"String", - "documentation":"

      The type of encryption key used by secure the Security Lake configuration object.

      " - }, - "replicationDestinationRegions":{ - "shape":"RegionSet", - "documentation":"

      Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

      Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

      " - }, - "replicationRoleArn":{ - "shape":"RoleArn", - "documentation":"

      Replication settings for the Amazon S3 buckets. This parameter uses the IAM role you created that is managed by Security Lake, to ensure the replication setting is correct.

      " - }, - "retentionSettings":{ - "shape":"RetentionSettingList", - "documentation":"

      Retention settings for the destination Amazon S3 buckets.

      " - }, - "s3BucketArn":{ - "shape":"S3BucketArn", - "documentation":"

      Amazon Resource Names (ARNs) uniquely identify Amazon Web Services resources. Security Lake requires an ARN when you need to specify a resource unambiguously across all of Amazon Web Services, such as in IAM policies, Amazon Relational Database Service (Amazon RDS) tags, and API calls.

      " - }, - "status":{ - "shape":"settingsStatus", - "documentation":"

      Retrieves the status of the configuration operation for an account in Amazon Security Lake.

      " + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      List the maximum number of failures in Security Lake.

      " }, - "tagsMap":{ - "shape":"TagsMap", - "documentation":"

      A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define.

      " + "nextToken":{ + "shape":"NextToken", + "documentation":"

      List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " }, - "updateStatus":{ - "shape":"UpdateStatus", - "documentation":"

      The status of the last UpdateDatalake or DeleteDatalake API request.

      " + "regions":{ + "shape":"RegionList", + "documentation":"

      List the Amazon Web Services Regions from which exceptions are retrieved.

      " } - }, - "documentation":"

      Provides details of Amazon Security Lake lake configuration object.

      " - }, - "LakeConfigurationResponseMap":{ - "type":"map", - "key":{"shape":"Region"}, - "value":{"shape":"LakeConfigurationResponse"} + } }, - "LastUpdateFailure":{ + "ListDataLakeExceptionsResponse":{ "type":"structure", "members":{ - "code":{ - "shape":"String", - "documentation":"

      The reason code for the failure of the last UpdateDatalake or DeleteDatalake API request.

      " + "exceptions":{ + "shape":"DataLakeExceptionList", + "documentation":"

      Lists the failures that cannot be retried in the current Region.

      " }, - "reason":{ - "shape":"String", - "documentation":"

      The reason for the failure of the last UpdateDatalakeor DeleteDatalake API request.

      " + "nextToken":{ + "shape":"NextToken", + "documentation":"

      List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " } - }, - "documentation":"

      The details of the last UpdateDatalake or DeleteDatalake API request which failed.

      " + } }, - "ListDatalakeExceptionsRequest":{ + "ListDataLakesRequest":{ "type":"structure", "members":{ - "maxFailures":{ - "shape":"Integer", - "documentation":"

      List the maximum number of failures in Security Lake.

      " - }, - "nextToken":{ - "shape":"SafeString", - "documentation":"

      List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " - }, - "regionSet":{ - "shape":"RegionSet", - "documentation":"

      List the Amazon Web Services Regions from which exceptions are retrieved.

      " + "regions":{ + "shape":"RegionList", + "documentation":"

      The list of regions where Security Lake is enabled.

      ", + "location":"querystring", + "locationName":"regions" } } }, - "ListDatalakeExceptionsResponse":{ + "ListDataLakesResponse":{ "type":"structure", - "required":["nonRetryableFailures"], "members":{ - "nextToken":{ - "shape":"SafeString", - "documentation":"

      List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

      Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " - }, - "nonRetryableFailures":{ - "shape":"FailuresResponseList", - "documentation":"

      Lists the failures that cannot be retried in the current Region.

      " + "dataLakes":{ + "shape":"DataLakeResourceList", + "documentation":"

      Retrieves the Security Lake configuration object.

      " } } }, "ListLogSourcesRequest":{ "type":"structure", "members":{ - "inputOrder":{ - "shape":"DimensionSet", - "documentation":"

      Lists the log sources in input order, namely Region, source type, and member account.

      " - }, - "listAllDimensions":{ - "shape":"AllDimensionsMap", - "documentation":"

      List the view of log sources for enabled Amazon Security Lake accounts for specific Amazon Web Services sources from specific accounts and specific Regions.

      " - }, - "listSingleDimension":{ - "shape":"InputSet", - "documentation":"

      List the view of log sources for enabled Security Lake accounts for all Amazon Web Services sources from specific accounts or specific Regions.

      " - }, - "listTwoDimensions":{ - "shape":"TwoDimensionsMap", - "documentation":"

      Lists the view of log sources for enabled Security Lake accounts for specific Amazon Web Services sources from specific accounts or specific Regions.

      " + "accounts":{ + "shape":"AccountList", + "documentation":"

      The list of Amazon Web Services accounts for which log sources are displayed.

      " }, "maxResults":{ - "shape":"Integer", + "shape":"MaxResults", "documentation":"

      The maximum number of accounts for which the log sources are displayed.

      " }, "nextToken":{ - "shape":"SafeString", + "shape":"NextToken", "documentation":"

      If nextToken is returned, there are more results available. You can repeat the call using the returned token to retrieve the next page.

      " + }, + "regions":{ + "shape":"RegionList", + "documentation":"

      The list of regions for which log sources are displayed.

      " + }, + "sources":{ + "shape":"LogSourceResourceList", + "documentation":"

      The list of sources for which log sources are displayed.

      " } } }, "ListLogSourcesResponse":{ "type":"structure", - "required":["regionSourceTypesAccountsList"], "members":{ "nextToken":{ - "shape":"String", + "shape":"NextToken", "documentation":"

      If nextToken is returned, there are more results available. You can repeat the call using the returned token to retrieve the next page.

      " }, - "regionSourceTypesAccountsList":{ - "shape":"RegionSourceTypesAccountsList", - "documentation":"

      Lists the log sources by Regions for enabled Security Lake accounts.

      " + "sources":{ + "shape":"LogSourceList", + "documentation":"

      The list of log sources in your organization that send data to the data lake.

      " } } }, @@ -1621,13 +1666,13 @@ "type":"structure", "members":{ "maxResults":{ - "shape":"Integer", + "shape":"MaxResults", "documentation":"

      The maximum number of accounts for which the configuration is displayed.

      ", "location":"querystring", "locationName":"maxResults" }, "nextToken":{ - "shape":"String", + "shape":"NextToken", "documentation":"

      If nextToken is returned, there are more results available. You can repeat the call using the returned token to retrieve the next page.

      ", "location":"querystring", "locationName":"nextToken" @@ -1636,128 +1681,125 @@ }, "ListSubscribersResponse":{ "type":"structure", - "required":["subscribers"], "members":{ "nextToken":{ - "shape":"String", + "shape":"NextToken", "documentation":"

      If nextToken is returned, there are more results available. You can repeat the call using the returned token to retrieve the next page.

      " }, - "subscribers":{ - "shape":"SubscriberList", - "documentation":"

      The subscribers available for the specified Security Lake account ID.

      " + "subscribers":{ + "shape":"SubscriberResourceList", + "documentation":"

      The subscribers available for the specified Security Lake account ID.

      " + } + } + }, + "LogSource":{ + "type":"structure", + "members":{ + "account":{ + "shape":"AwsAccountId", + "documentation":"

      Specify the account from which you want to collect logs.

      " + }, + "region":{ + "shape":"Region", + "documentation":"

      Specify the Regions from which you want to collect logs.

      " + }, + "sources":{ + "shape":"LogSourceResourceList", + "documentation":"

      Specify the sources from which you want to collect logs.

      " } - } + }, + "documentation":"

      Amazon Security Lake can collect logs and events from natively-supported Amazon Web Services services and custom sources.

      " + }, + "LogSourceList":{ + "type":"list", + "member":{"shape":"LogSource"} }, - "LogsStatus":{ + "LogSourceResource":{ "type":"structure", - "required":[ - "healthStatus", - "pathToLogs" - ], "members":{ - "healthStatus":{ - "shape":"SourceStatus", - "documentation":"

      The health status of services, including error codes and patterns.

      " + "awsLogSource":{ + "shape":"AwsLogSourceResource", + "documentation":"

      Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

      " }, - "pathToLogs":{ - "shape":"String", - "documentation":"

      Defines path the stored logs are available which has information on your systems, applications, and services.

      " + "customLogSource":{ + "shape":"CustomLogSourceResource", + "documentation":"

      Amazon Security Lake supports custom source types. For a detailed list, see the Amazon Security Lake User Guide.

      " } }, - "documentation":"

      Retrieves the Logs status for the Amazon Security Lake account.

      " + "documentation":"

      The supported source types from which logs and events are collected in Amazon Security Lake. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

      ", + "union":true }, - "LogsStatusList":{ + "LogSourceResourceList":{ "type":"list", - "member":{"shape":"LogsStatus"} + "member":{"shape":"LogSourceResource"} }, "Long":{ "type":"long", "box":true }, - "OcsfEventClass":{ + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NextToken":{ "type":"string", - "enum":[ - "ACCESS_ACTIVITY", - "FILE_ACTIVITY", - "KERNEL_ACTIVITY", - "KERNEL_EXTENSION", - "MEMORY_ACTIVITY", - "MODULE_ACTIVITY", - "PROCESS_ACTIVITY", - "REGISTRY_KEY_ACTIVITY", - "REGISTRY_VALUE_ACTIVITY", - "RESOURCE_ACTIVITY", - "SCHEDULED_JOB_ACTIVITY", - "SECURITY_FINDING", - "ACCOUNT_CHANGE", - "AUTHENTICATION", - "AUTHORIZATION", - "ENTITY_MANAGEMENT_AUDIT", - "DHCP_ACTIVITY", - "NETWORK_ACTIVITY", - "DNS_ACTIVITY", - "FTP_ACTIVITY", - "HTTP_ACTIVITY", - "RDP_ACTIVITY", - "SMB_ACTIVITY", - "SSH_ACTIVITY", - "CLOUD_API", - "CONTAINER_LIFECYCLE", - "DATABASE_LIFECYCLE", - "CONFIG_STATE", - "CLOUD_STORAGE", - "INVENTORY_INFO", - "RFB_ACTIVITY", - "SMTP_ACTIVITY", - "VIRTUAL_MACHINE_ACTIVITY" - ] + "pattern":"^[\\\\\\w\\-_:/.@=+]*$" }, - "ProtocolAndNotificationEndpoint":{ + "NotificationConfiguration":{ "type":"structure", "members":{ - "endpoint":{ - "shape":"SafeString", - "documentation":"

      The account that is subscribed to receive exception notifications.

      " + "httpsNotificationConfiguration":{ + "shape":"HttpsNotificationConfiguration", + "documentation":"

      The configurations for HTTPS subscriber notification.

      " }, - "protocol":{ - "shape":"SafeString", - "documentation":"

      The protocol to which notification messages are posted.

      " + "sqsNotificationConfiguration":{ + "shape":"SqsNotificationConfiguration", + "documentation":"

      The configurations for SQS subscriber notification.

      " } }, - "documentation":"

      Protocol used in Amazon Security Lake that dictates how notifications are posted at the endpoint.

      " + "documentation":"

      Specify the configurations you want to use for subscriber notification to notify the subscriber when new data is written to the data lake for sources that the subscriber consumes in Security Lake.

      ", + "union":true + }, + "OcsfEventClass":{ + "type":"string", + "pattern":"^[A-Z\\_0-9]*$" + }, + "OcsfEventClassList":{ + "type":"list", + "member":{"shape":"OcsfEventClass"} }, "Region":{ "type":"string", - "enum":[ - "us-east-1", - "us-west-2", - "eu-central-1", - "us-east-2", - "eu-west-1", - "ap-northeast-1", - "ap-southeast-2" - ] + "pattern":"^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$" }, - "RegionSet":{ + "RegionList":{ "type":"list", "member":{"shape":"Region"} }, - "RegionSourceTypesAccountsList":{ - "type":"list", - "member":{"shape":"AllDimensionsMap"} + "RegisterDataLakeDelegatedAdministratorRequest":{ + "type":"structure", + "required":["accountId"], + "members":{ + "accountId":{ + "shape":"SafeString", + "documentation":"

      The Amazon Web Services account ID of the Security Lake delegated administrator.

      " + } + } + }, + "RegisterDataLakeDelegatedAdministratorResponse":{ + "type":"structure", + "members":{ + } }, "ResourceNotFoundException":{ "type":"structure", - "required":[ - "message", - "resourceId", - "resourceType" - ], "members":{ "message":{"shape":"String"}, - "resourceId":{ + "resourceName":{ "shape":"String", - "documentation":"

      The ID of the resource for which the type of resource could not be found.

      " + "documentation":"

      The name of the resource that could not be found.

      " }, "resourceType":{ "shape":"String", @@ -1776,154 +1818,54 @@ "type":"string", "pattern":"^LakeFormation(?:-V[0-9]+)-([a-zA-Z0-9]+)-([\\\\\\w\\-_:/.@=+]*)$" }, - "RetentionSetting":{ - "type":"structure", - "members":{ - "retentionPeriod":{ - "shape":"RetentionSettingRetentionPeriodInteger", - "documentation":"

      The retention period specifies a fixed period of time during which the Security Lake object remains locked. You can specify the retention period in days for one or more sources.

      " - }, - "storageClass":{ - "shape":"StorageClass", - "documentation":"

      The range of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads.

      " - } - }, - "documentation":"

      Retention settings for the destination Amazon S3 buckets in Amazon Security Lake.

      " - }, - "RetentionSettingList":{ - "type":"list", - "member":{"shape":"RetentionSetting"} - }, - "RetentionSettingRetentionPeriodInteger":{ - "type":"integer", - "box":true, - "min":1 - }, "RoleArn":{ "type":"string", - "pattern":"^arn:.*" + "pattern":"^arn:.*$" }, "S3BucketArn":{"type":"string"}, - "S3Exception":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

      Provides an extension of the AmazonServiceException for errors reported by Amazon S3 while processing a request. In particular, this class provides access to the Amazon S3 extended request ID. If Amazon S3 is incorrectly handling a request and you need to contact Amazon, this extended request ID may provide useful debugging information.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true + "S3URI":{ + "type":"string", + "documentation":"

      A complete S3 URI pointing to a valid S3 object.

      ", + "max":1024, + "min":0, + "pattern":"^s3[an]?://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/[^/].*)+$" }, "SafeString":{ "type":"string", "pattern":"^[\\\\\\w\\-_:/.@=+]*$" }, - "ServiceQuotaExceededException":{ - "type":"structure", - "required":[ - "message", - "quotaCode", - "resourceId", - "resourceType", - "serviceCode" - ], - "members":{ - "message":{"shape":"String"}, - "quotaCode":{ - "shape":"String", - "documentation":"

      That the rate of requests to Security Lake is exceeding the request quotas for your Amazon Web Services account.

      " - }, - "resourceId":{ - "shape":"String", - "documentation":"

      The ID of the resource that exceeds the service quota.

      " - }, - "resourceType":{ - "shape":"String", - "documentation":"

      The type of the resource that exceeds the service quota.

      " - }, - "serviceCode":{ - "shape":"String", - "documentation":"

      The code for the service in Service Quotas.

      " - } - }, - "documentation":"

      You have exceeded your service quota. To perform the requested action, remove some of the relevant resources, or use Service Quotas to request a service quota increase.

      ", - "error":{ - "httpStatusCode":402, - "senderFault":true - }, - "exception":true - }, - "SnsTopicArn":{"type":"string"}, - "SourceStatus":{ + "SourceCollectionStatus":{ "type":"string", "enum":[ - "ACTIVE", - "DEACTIVATED", - "PENDING" + "COLLECTING", + "MISCONFIGURED", + "NOT_COLLECTING" ] }, - "SourceType":{ + "SqsNotificationConfiguration":{ "type":"structure", "members":{ - "awsSourceType":{ - "shape":"AwsLogSourceType", - "documentation":"

      Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

      " - }, - "customSourceType":{ - "shape":"CustomSourceType", - "documentation":"

      Amazon Security Lake supports custom source types. For a detailed list, see the Amazon Security Lake User Guide.

      " - } }, - "documentation":"

      The supported source types from which logs and events are collected in Amazon Security Lake. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

      ", - "union":true - }, - "SourceTypeList":{ - "type":"list", - "member":{"shape":"SourceType"} - }, - "StorageClass":{ - "type":"string", - "enum":[ - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "GLACIER_IR", - "GLACIER", - "DEEP_ARCHIVE", - "EXPIRE" - ] + "documentation":"

      The configurations for SQS subscriber notification.

      " }, "String":{"type":"string"}, - "SubscriberList":{ - "type":"list", - "member":{"shape":"SubscriberResource"} - }, "SubscriberResource":{ "type":"structure", "required":[ - "accountId", - "sourceTypes", - "subscriptionId" + "sources", + "subscriberArn", + "subscriberId", + "subscriberIdentity", + "subscriberName" ], "members":{ "accessTypes":{ "shape":"AccessTypeList", - "documentation":"

      You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.

      Subscribers can consume data by directly querying Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as LAKEFORMATION.

      " - }, - "accountId":{ - "shape":"AwsAccountId", - "documentation":"

      The Amazon Web Services account ID you are using to create your Amazon Security Lake account.

      " + "documentation":"

      You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.

      Subscribers can consume data by directly querying Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as LAKEFORMATION.

      " }, "createdAt":{ "shape":"SyntheticTimestamp_date_time", - "documentation":"

      The date and time when the subscription was created.

      " - }, - "externalId":{ - "shape":"SafeString", - "documentation":"

      The external ID of the subscriber. The external ID lets the user that is assuming the role assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances.

      " + "documentation":"

      The date and time when the subscriber was created.

      " }, "resourceShareArn":{ "shape":"ResourceShareArn", @@ -1939,62 +1881,52 @@ }, "s3BucketArn":{ "shape":"S3BucketArn", - "documentation":"

      The ARN for the Amazon S3 bucket.

      " - }, - "snsArn":{ - "shape":"SnsTopicArn", - "documentation":"

      The ARN for the Amazon Simple Notification Service.

      " + "documentation":"

      The ARN for the Amazon S3 bucket.

      " }, - "sourceTypes":{ - "shape":"SourceTypeList", + "sources":{ + "shape":"LogSourceResourceList", "documentation":"

      Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide.

      " }, + "subscriberArn":{ + "shape":"AmazonResourceName", + "documentation":"

      The subscriber ARN of the Amazon Security Lake subscriber account.

      " + }, "subscriberDescription":{ "shape":"SafeString", - "documentation":"

      The subscriber descriptions for a subscriber account. The description for a subscriber includes subscriberName, accountID, externalID, and subscriptionId.

      " + "documentation":"

      The subscriber descriptions for a subscriber account. The description for a subscriber includes subscriberName, accountID, externalID, and subscriberId.

      " }, - "subscriberName":{ + "subscriberEndpoint":{ "shape":"SafeString", - "documentation":"

      The name of your Amazon Security Lake subscriber account.

      " - }, - "subscriptionEndpoint":{ - "shape":"String", - "documentation":"

      The subscription endpoint to which exception messages are posted.

      " + "documentation":"

      The subscriber endpoint to which exception messages are posted.

      " }, - "subscriptionId":{ + "subscriberId":{ "shape":"UUID", - "documentation":"

      The subscription ID of the Amazon Security Lake subscriber account.

      " + "documentation":"

      The subscriber ID of the Amazon Security Lake subscriber account.

      " }, - "subscriptionProtocol":{ - "shape":"EndpointProtocol", - "documentation":"

      The subscription protocol to which exception messages are posted.

      " + "subscriberIdentity":{ + "shape":"AwsIdentity", + "documentation":"

      The AWS identity used to access your data.

      " + }, + "subscriberName":{ + "shape":"SafeString", + "documentation":"

      The name of your Amazon Security Lake subscriber account.

      " }, - "subscriptionStatus":{ - "shape":"SubscriptionStatus", - "documentation":"

      The subscription status of the Amazon Security Lake subscriber account.

      " + "subscriberStatus":{ + "shape":"SubscriberStatus", + "documentation":"

      The subscriber status of the Amazon Security Lake subscriber account.

      " }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", - "documentation":"

      The date and time when the subscription was created.

      " + "documentation":"

      The date and time when the subscriber was last updated.

      " } }, - "documentation":"

      Provides details about the Amazon Security Lake account subscription. Subscribers are notified of new objects for a source as the data is written to your Amazon S3 bucket for Security Lake.

      " + "documentation":"

      Provides details about the Amazon Security Lake account subscription. Subscribers are notified of new objects for a source as the data is written to your Amazon S3 bucket for Security Lake.

      " }, - "SubscriptionProtocolType":{ - "type":"string", - "enum":[ - "HTTP", - "HTTPS", - "EMAIL", - "EMAIL_JSON", - "SMS", - "SQS", - "LAMBDA", - "APP", - "FIREHOSE" - ] + "SubscriberResourceList":{ + "type":"list", + "member":{"shape":"SubscriberResource"} }, - "SubscriptionStatus":{ + "SubscriberStatus":{ "type":"string", "enum":[ "ACTIVE", @@ -2003,18 +1935,16 @@ "READY" ] }, + "SubscriptionProtocol":{ + "type":"string", + "pattern":"^[a-z\\-]*$" + }, "SyntheticTimestamp_date_time":{ "type":"timestamp", "timestampFormat":"iso8601" }, - "TagsMap":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"String"} - }, "ThrottlingException":{ "type":"structure", - "required":["message"], "members":{ "message":{"shape":"String"}, "quotaCode":{ @@ -2023,7 +1953,7 @@ }, "retryAfterSeconds":{ "shape":"Integer", - "documentation":"

      Retry the request after the specified time.

      ", + "documentation":"

      Retry the request after the specified time.

      ", "location":"header", "locationName":"Retry-After" }, @@ -2032,7 +1962,7 @@ "documentation":"

      The code for the service in Service Quotas.

      " } }, - "documentation":"

      The limit on the number of requests per second was exceeded.

      ", + "documentation":"

      The limit on the number of requests per second was exceeded.

      ", "error":{ "httpStatusCode":429, "senderFault":true @@ -2040,118 +1970,113 @@ "exception":true, "retryable":{"throttling":true} }, - "TwoDimensionsMap":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"ValueSet"} - }, "UUID":{ "type":"string", - "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" - }, - "UpdateDatalakeExceptionsExpiryRequest":{ - "type":"structure", - "required":["exceptionMessageExpiry"], - "members":{ - "exceptionMessageExpiry":{ - "shape":"UpdateDatalakeExceptionsExpiryRequestExceptionMessageExpiryLong", - "documentation":"

      The time-to-live (TTL) for the exception message to remain.

      " - } - } - }, - "UpdateDatalakeExceptionsExpiryRequestExceptionMessageExpiryLong":{ - "type":"long", - "box":true, - "min":1 + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" }, - "UpdateDatalakeExceptionsExpiryResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateDatalakeExceptionsSubscriptionRequest":{ + "UpdateDataLakeExceptionSubscriptionRequest":{ "type":"structure", "required":[ "notificationEndpoint", "subscriptionProtocol" ], "members":{ + "exceptionTimeToLive":{ + "shape":"UpdateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong", + "documentation":"

      The time-to-live (TTL) for the exception message to remain.

      " + }, "notificationEndpoint":{ "shape":"SafeString", "documentation":"

      The account that is subscribed to receive exception notifications.

      " }, "subscriptionProtocol":{ - "shape":"SubscriptionProtocolType", - "documentation":"

      The subscription protocol to which exception messages are posted.

      " + "shape":"SubscriptionProtocol", + "documentation":"

      The subscription protocol to which exception messages are posted.

      " } } }, - "UpdateDatalakeExceptionsSubscriptionResponse":{ + "UpdateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong":{ + "type":"long", + "box":true, + "min":1 + }, + "UpdateDataLakeExceptionSubscriptionResponse":{ "type":"structure", "members":{ } }, - "UpdateDatalakeRequest":{ + "UpdateDataLakeRequest":{ "type":"structure", "required":["configurations"], "members":{ "configurations":{ - "shape":"LakeConfigurationRequestMap", + "shape":"DataLakeConfigurationList", "documentation":"

      Specify the Region or Regions that will contribute data to the rollup region.

      " } } }, - "UpdateDatalakeResponse":{ + "UpdateDataLakeResponse":{ "type":"structure", "members":{ + "dataLakes":{ + "shape":"DataLakeResourceList", + "documentation":"

      The created Security Lake configuration object.

      " + } } }, - "UpdateStatus":{ + "UpdateSubscriberNotificationRequest":{ "type":"structure", + "required":[ + "configuration", + "subscriberId" + ], "members":{ - "lastUpdateFailure":{ - "shape":"LastUpdateFailure", - "documentation":"

      The details of the last UpdateDatalakeor DeleteDatalake API request which failed.

      " - }, - "lastUpdateRequestId":{ - "shape":"String", - "documentation":"

      The unique ID for the UpdateDatalake or DeleteDatalake API request.

      " + "configuration":{ + "shape":"NotificationConfiguration", + "documentation":"

      The configuration for subscriber notification.

      " }, - "lastUpdateStatus":{ - "shape":"settingsStatus", - "documentation":"

      The status of the last UpdateDatalake or DeleteDatalake API request that was requested.

      " + "subscriberId":{ + "shape":"UUID", + "documentation":"

      The subscription ID for which the subscription notification is specified.

      ", + "location":"uri", + "locationName":"subscriberId" } - }, - "documentation":"

      The status of the last UpdateDatalake or DeleteDatalake API request. This is set to Completed after the configuration is updated, or removed if deletion of the data lake is successful.

      " + } }, - "UpdateSubscriberRequest":{ + "UpdateSubscriberNotificationResponse":{ "type":"structure", - "required":[ - "id", - "sourceTypes" - ], "members":{ - "externalId":{ + "subscriberEndpoint":{ "shape":"SafeString", - "documentation":"

      The external ID of the Security Lake account.

      " - }, - "id":{ - "shape":"String", - "documentation":"

      A value created by Security Lake that uniquely identifies your subscription.

      ", - "location":"uri", - "locationName":"id" - }, - "sourceTypes":{ - "shape":"SourceTypeList", + "documentation":"

      The subscriber endpoint to which exception messages are posted.

      " + } + } + }, + "UpdateSubscriberRequest":{ + "type":"structure", + "required":["subscriberId"], + "members":{ + "sources":{ + "shape":"LogSourceResourceList", "documentation":"

      The supported Amazon Web Services from which logs and events are collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

      " }, "subscriberDescription":{ "shape":"DescriptionString", "documentation":"

      The description of the Security Lake account subscriber.

      " }, + "subscriberId":{ + "shape":"UUID", + "documentation":"

      A value created by Security Lake that uniquely identifies your subscription.

      ", + "location":"uri", + "locationName":"subscriberId" + }, + "subscriberIdentity":{ + "shape":"AwsIdentity", + "documentation":"

      The AWS identity used to access your data.

      " + }, "subscriberName":{ "shape":"UpdateSubscriberRequestSubscriberNameString", - "documentation":"

      The name of the Security Lake account subscriber.

      " + "documentation":"

      The name of the Security Lake account subscriber.

      " } } }, @@ -2166,127 +2091,10 @@ "members":{ "subscriber":{ "shape":"SubscriberResource", - "documentation":"

      The account of the subscriber.

      " - } - } - }, - "UpdateSubscriptionNotificationConfigurationRequest":{ - "type":"structure", - "required":["subscriptionId"], - "members":{ - "createSqs":{ - "shape":"Boolean", - "documentation":"

      Create a new subscription notification for the specified subscription ID in Amazon Security Lake.

      " - }, - "httpsApiKeyName":{ - "shape":"String", - "documentation":"

      The key name for the subscription notification.

      " - }, - "httpsApiKeyValue":{ - "shape":"String", - "documentation":"

      The key value for the subscription notification.

      " - }, - "httpsMethod":{ - "shape":"HttpsMethod", - "documentation":"

      The HTTPS method used for the subscription notification.

      " - }, - "roleArn":{ - "shape":"RoleArn", - "documentation":"

      The Amazon Resource Name (ARN) specifying the role of the subscriber. For more information about ARNs and how to use them in policies, see, see the Managing data access and Amazon Web Services Managed Policiesin the Amazon Security Lake User Guide.

      " - }, - "subscriptionEndpoint":{ - "shape":"UpdateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString", - "documentation":"

      The subscription endpoint in Security Lake.

      " - }, - "subscriptionId":{ - "shape":"UUID", - "documentation":"

      The subscription ID for which the subscription notification is specified.

      ", - "location":"uri", - "locationName":"subscriptionId" - } - } - }, - "UpdateSubscriptionNotificationConfigurationRequestSubscriptionEndpointString":{ - "type":"string", - "pattern":"^(arn:aws:.+$|https?://.+$)" - }, - "UpdateSubscriptionNotificationConfigurationResponse":{ - "type":"structure", - "members":{ - "queueArn":{ - "shape":"SafeString", - "documentation":"

      Returns the ARN of the queue.

      " + "documentation":"

      The updated subscriber information.

      " } } - }, - "ValidationException":{ - "type":"structure", - "required":[ - "message", - "reason" - ], - "members":{ - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

      The list of parameters that failed to validate.

      " - }, - "message":{"shape":"String"}, - "reason":{ - "shape":"ValidationExceptionReason", - "documentation":"

      The reason for the validation exception.

      " - } - }, - "documentation":"

      Your signing certificate could not be validated.

      ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ValidationExceptionField":{ - "type":"structure", - "required":[ - "message", - "name" - ], - "members":{ - "message":{ - "shape":"String", - "documentation":"

      Describes the error encountered.

      " - }, - "name":{ - "shape":"String", - "documentation":"

      Name of the validation exception.

      " - } - }, - "documentation":"

      The input fails to meet the constraints specified in Amazon Security Lake.

      " - }, - "ValidationExceptionFieldList":{ - "type":"list", - "member":{"shape":"ValidationExceptionField"} - }, - "ValidationExceptionReason":{ - "type":"string", - "enum":[ - "unknownOperation", - "cannotParse", - "fieldValidationFailed", - "other" - ] - }, - "ValueSet":{ - "type":"list", - "member":{"shape":"String"} - }, - "settingsStatus":{ - "type":"string", - "enum":[ - "INITIALIZED", - "PENDING", - "COMPLETED", - "FAILED" - ] } }, - "documentation":"

      Amazon Security Lake is in preview release. Your use of the Security Lake preview is subject to Section 2 of the Amazon Web Services Service Terms(\"Betas and Previews\").

      Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Servicesaccount. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data.

      The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data.

      Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service in Security Lake CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

      Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF).

      Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics.

      " + "documentation":"

      Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data.

      The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data.

      Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

      Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF).

      Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics.

      " } diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 6d345a3edac0..4d8809ad6d9c 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 7e3247278f65..c6ee47052e65 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json index 65f3ed6bc2d0..c228fbebef61 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json @@ -649,7 +649,7 @@ {"shape":"InvalidParametersException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Disassociates a previously associated principal ARN from a specified portfolio.

      The PrincipalType and PrincipalARN must match the AssociatePrincipalWithPortfolio call request details. For example, to disassociate an association created with a PrincipalARN of PrincipalType IAM you must use the PrincipalType IAM when calling DisassociatePrincipalFromPortfolio.

      For portfolios that have been shared with principal name sharing enabled: after disassociating a principal, share recipient accounts will no longer be able to provision products in this portfolio using a role matching the name of the associated principal.

      " + "documentation":"

      Disassociates a previously associated principal ARN from a specified portfolio.

      The PrincipalType and PrincipalARN must match the AssociatePrincipalWithPortfolio call request details. For example, to disassociate an association created with a PrincipalARN of PrincipalType IAM you must use the PrincipalType IAM when calling DisassociatePrincipalFromPortfolio.

      For portfolios that have been shared with principal name sharing enabled: after disassociating a principal, share recipient accounts will no longer be able to provision products in this portfolio using a role matching the name of the associated principal.

      For more information, review associate-principal-with-portfolio in the Amazon Web Services CLI Command Reference.

      If you disassociate a principal from a portfolio, with PrincipalType as IAM, the same principal will still have access to the portfolio if it matches one of the associated principals of type IAM_PATTERN. To fully remove access for a principal, verify all the associated Principals of type IAM_PATTERN, and then ensure you disassociate any IAM_PATTERN principals that match the principal whose access you are removing.

      " }, "DisassociateProductFromPortfolio":{ "name":"DisassociateProductFromPortfolio", @@ -1421,11 +1421,11 @@ }, "PrincipalARN":{ "shape":"PrincipalARN", - "documentation":"

      The ARN of the principal (user, role, or group). This field allows an ARN with no accountID if PrincipalType is IAM_PATTERN.

      You can associate multiple IAM patterns even if the account has no principal with that name. This is useful in Principal Name Sharing if you want to share a principal without creating it in the account that owns the portfolio.

      " + "documentation":"

      The ARN of the principal (user, role, or group). If the PrincipalType is IAM, the supported value is a fully defined IAM Amazon Resource Name (ARN). If the PrincipalType is IAM_PATTERN, the supported value is an IAM ARN without an AccountID in the following format:

      arn:partition:iam:::resource-type/resource-id

      The ARN resource-id can be either:

      • A fully formed resource-id. For example, arn:aws:iam:::role/resource-name or arn:aws:iam:::role/resource-path/resource-name

      • A wildcard ARN. The wildcard ARN accepts IAM_PATTERN values with a \"*\" or \"?\" in the resource-id segment of the ARN. For example arn:partition:service:::resource-type/resource-path/resource-name. The new symbols are exclusive to the resource-path and resource-name and cannot replace the resource-type or other ARN values.

        The ARN path and principal name allow unlimited wildcard characters.

      Examples of an acceptable wildcard ARN:

      • arn:aws:iam:::role/ResourceName_*

      • arn:aws:iam:::role/*/ResourceName_?

      Examples of an unacceptable wildcard ARN:

      • arn:aws:iam:::*/ResourceName

      You can associate multiple IAM_PATTERNs even if the account has no principal with that name.

      The \"?\" wildcard character matches zero or one of any character. This is similar to \".?\" in regular regex context. The \"*\" wildcard character matches any number of any characters. This is similar to \".*\" in regular regex context.

      In the IAM Principal ARN format (arn:partition:iam:::resource-type/resource-path/resource-name), valid resource-type values include user/, group/, or role/. The \"?\" and \"*\" characters are allowed only after the resource-type in the resource-id segment. You can use special characters anywhere within the resource-id.

      The \"*\" character also matches the \"/\" character, allowing paths to be formed within the resource-id. For example, arn:aws:iam:::role/*/ResourceName_? matches both arn:aws:iam:::role/pathA/pathB/ResourceName_1 and arn:aws:iam:::role/pathA/ResourceName_1.

      " }, "PrincipalType":{ "shape":"PrincipalType", - "documentation":"

      The principal type. The supported value is IAM if you use a fully defined ARN, or IAM_PATTERN if you use an ARN with no accountID.

      " + "documentation":"

      The principal type. The supported value is IAM if you use a fully defined Amazon Resource Name (ARN), or IAM_PATTERN if you use an ARN with no accountID, with or without wildcard characters.

      " } } }, @@ -2751,6 +2751,10 @@ "Verbose":{ "shape":"Verbose", "documentation":"

      Indicates whether a verbose level of detail is enabled.

      " + }, + "IncludeProvisioningArtifactParameters":{ + "shape":"Boolean", + "documentation":"

      Indicates if the API call response does or does not include additional details about the provisioning parameters.

      " } } }, @@ -2768,6 +2772,10 @@ "Status":{ "shape":"Status", "documentation":"

      The status of the current request.

      " + }, + "ProvisioningArtifactParameters":{ + "shape":"ProvisioningArtifactParameters", + "documentation":"

      Information about the parameters used to provision the product.

      " } } }, @@ -3001,11 +3009,11 @@ }, "PrincipalARN":{ "shape":"PrincipalARN", - "documentation":"

      The ARN of the principal (user, role, or group). This field allows an ARN with no accountID if PrincipalType is IAM_PATTERN.

      " + "documentation":"

      The ARN of the principal (user, role, or group). This field allows an ARN with no accountID with or without wildcard characters if PrincipalType is IAM_PATTERN.

      " }, "PrincipalType":{ "shape":"PrincipalType", - "documentation":"

      The supported value is IAM if you use a fully defined ARN, or IAM_PATTERN if you use no accountID.

      " + "documentation":"

      The supported value is IAM if you use a fully defined ARN, or IAM_PATTERN if you specify an IAM ARN with no AccountId, with or without wildcard characters.

      " } } }, @@ -4531,11 +4539,11 @@ "members":{ "PrincipalARN":{ "shape":"PrincipalARN", - "documentation":"

      The ARN of the principal (user, role, or group). This field allows for an ARN with no accountID if the PrincipalType is an IAM_PATTERN.

      " + "documentation":"

      The ARN of the principal (user, role, or group). This field allows for an ARN with no accountID, with or without wildcard characters if the PrincipalType is an IAM_PATTERN.

      For more information, review associate-principal-with-portfolio in the Amazon Web Services CLI Command Reference.

      " }, "PrincipalType":{ "shape":"PrincipalType", - "documentation":"

      The principal type. The supported value is IAM if you use a fully defined ARN, or IAM_PATTERN if you use an ARN with no accountID.

      " + "documentation":"

      The principal type. The supported value is IAM if you use a fully defined ARN, or IAM_PATTERN if you use an ARN with no accountID, with or without wildcard characters.

      " } }, "documentation":"

      Information about a principal.

      " diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 3ed092ed67b3..9822fce47583 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 809f4645d460..cc7410604a42 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicediscovery/src/main/resources/codegen-resources/customization.config b/services/servicediscovery/src/main/resources/codegen-resources/customization.config index 3e6687d3220e..91ee5edcc086 100644 --- a/services/servicediscovery/src/main/resources/codegen-resources/customization.config +++ b/services/servicediscovery/src/main/resources/codegen-resources/customization.config @@ -3,5 +3,6 @@ "listNamespaces", "listOperations", "listServices" - ] + ], + "generateEndpointClientTests": true } diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 3cbf0c0b1c01..a0d353fba151 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index 72701fe81c7c..1a52d6e11d99 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index bd9b77f21e80..5c7705e0c3f5 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/endpoint-tests.json b/services/sesv2/src/main/resources/codegen-resources/endpoint-tests.json index e312a33c9715..fa4feb98bdd2 100644 --- a/services/sesv2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sesv2/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -438,8 +438,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -451,8 +462,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -464,8 +486,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -477,8 +510,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -490,8 +534,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -503,8 +547,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -516,8 +560,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -528,8 +572,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -540,10 +584,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index 80a349b81d27..668c5a618abc 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -1019,6 +1019,23 @@ ], "documentation":"

      Move a dedicated IP address to an existing dedicated IP pool.

      The dedicated IP address that you specify must already exist, and must be associated with your Amazon Web Services account.

      The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool operation.

      " }, + "PutDedicatedIpPoolScalingAttributes":{ + "name":"PutDedicatedIpPoolScalingAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/dedicated-ip-pools/{PoolName}/scaling" + }, + "input":{"shape":"PutDedicatedIpPoolScalingAttributesRequest"}, + "output":{"shape":"PutDedicatedIpPoolScalingAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

      Used to convert a dedicated IP pool to a different scaling mode.

      MANAGED pools cannot be converted to STANDARD scaling mode.

      ", + "idempotent":true + }, "PutDedicatedIpWarmupAttributes":{ "name":"PutDedicatedIpWarmupAttributes", "http":{ @@ -2274,7 +2291,7 @@ }, "ScalingMode":{ "shape":"ScalingMode", - "documentation":"

      The type of the dedicated IP pool.

      • STANDARD – A dedicated IP pool where the customer can control which IPs are part of the pool.

      • MANAGED – A dedicated IP pool where the reputation and number of IPs is automatically managed by Amazon SES.

      " + "documentation":"

      The type of the dedicated IP pool.

      • STANDARD – A dedicated IP pool where you can control which IPs are part of the pool.

      • MANAGED – A dedicated IP pool where the reputation and number of IPs are automatically managed by Amazon SES.

      " } }, "documentation":"

      Contains information about a dedicated IP pool.

      " @@ -3221,7 +3238,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

      The contact's email addres.

      ", + "documentation":"

      The contact's email address.

      ", "location":"uri", "locationName":"EmailAddress" } @@ -3236,7 +3253,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

      The contact's email addres.

      " + "documentation":"

      The contact's email address.

      " }, "TopicPreferences":{ "shape":"TopicPreferenceList", @@ -5052,6 +5069,32 @@ }, "documentation":"

      An HTTP 200 response if the request succeeds, or an error message if the request fails.

      " }, + "PutDedicatedIpPoolScalingAttributesRequest":{ + "type":"structure", + "required":[ + "PoolName", + "ScalingMode" + ], + "members":{ + "PoolName":{ + "shape":"PoolName", + "documentation":"

      The name of the dedicated IP pool.

      ", + "location":"uri", + "locationName":"PoolName" + }, + "ScalingMode":{ + "shape":"ScalingMode", + "documentation":"

      The scaling mode to apply to the dedicated IP pool.

      Changing the scaling mode from MANAGED to STANDARD is not supported.

      " + } + }, + "documentation":"

      A request to convert a dedicated IP pool to a different scaling mode.

      " + }, + "PutDedicatedIpPoolScalingAttributesResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

      An HTTP 200 response if the request succeeds, or an error message if the request fails.

      " + }, "PutDedicatedIpWarmupAttributesRequest":{ "type":"structure", "required":[ @@ -6070,7 +6113,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

      The contact's email addres.

      ", + "documentation":"

      The contact's email address.

      ", "location":"uri", "locationName":"EmailAddress" }, diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 8c79bdbd8c4c..ced409a7c68e 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/sfn/src/main/resources/codegen-resources/customization.config b/services/sfn/src/main/resources/codegen-resources/customization.config index 4e8192563f3e..e6d719333524 100644 --- a/services/sfn/src/main/resources/codegen-resources/customization.config +++ b/services/sfn/src/main/resources/codegen-resources/customization.config @@ -3,5 +3,6 @@ "listActivities", "listStateMachines" ], - "serviceSpecificHttpConfig": "software.amazon.awssdk.services.sfn.internal.SfnHttpConfigurationOptions" + "serviceSpecificHttpConfig": "software.amazon.awssdk.services.sfn.internal.SfnHttpConfigurationOptions", + "generateEndpointClientTests": true } diff --git a/services/sfn/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/sfn/src/main/resources/codegen-resources/endpoint-rule-set.json index 59fd81e898d9..4d52cffb4271 100644 --- a/services/sfn/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/sfn/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,193 +111,257 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsDualStack" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://states-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://states-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "us-gov-west-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], - "endpoint": { - "url": "https://states.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-gov-west-1" + ] + } + ], + "endpoint": { + "url": "https://states.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://states-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://states-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://states.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://states.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://states.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://states.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/sfn/src/main/resources/codegen-resources/endpoint-tests.json b/services/sfn/src/main/resources/codegen-resources/endpoint-tests.json index a3e18f2bb95c..203779f9268e 100644 --- a/services/sfn/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sfn/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1711 +1,584 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-south-2" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-south-2" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://states-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "sa-east-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://states-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "sa-east-1" - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.sa-east-1.api.aws" + "url": "https://states.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "af-south-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.sa-east-1.amazonaws.com" + "url": "https://states.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-east-1.api.aws" + "url": "https://states.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-east-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-east-1.amazonaws.com" + "url": "https://states.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-east-1" + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-east-1.api.aws" + "url": "https://states.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-east-1.amazonaws.com" + "url": "https://states.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://states.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "cn-north-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.cn-north-1.amazonaws.com.cn" + "url": "https://states.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "cn-north-1" + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://states.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.cn-north-1.amazonaws.com.cn" + "url": "https://states.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.us-gov-west-1.api.aws" + "url": "https://states.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-gov-west-1.amazonaws.com" + "url": "https://states.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-gov-west-1.api.aws" + "url": "https://states.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-gov-west-1.amazonaws.com" + "url": "https://states.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-1.api.aws" + "url": "https://states.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-1" + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-1.amazonaws.com" + "url": "https://states.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "ap-southeast-1" + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-1.api.aws" + "url": "https://states.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-1.amazonaws.com" + "url": "https://states.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-2.api.aws" + "url": "https://states.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-2" + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-2.amazonaws.com" + "url": "https://states-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-2.api.aws" + "url": "https://states.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-2.amazonaws.com" + "url": "https://states-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-southeast-2" - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://states.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://states-fips.us-west-1.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "us-iso-east-1" + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-iso-east-1.c2s.ic.gov" + "url": "https://states.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-3.api.aws" + "url": "https://states-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-west-2", "UseFIPS": true, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-3.amazonaws.com" + "url": "https://states-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-3" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-3.api.aws" + "url": "https://states.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-3.amazonaws.com" + "url": "https://states.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-4.api.aws" + "url": "https://states.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "ap-southeast-4" + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states-fips.ap-southeast-4.amazonaws.com" + "url": "https://states-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-southeast-4" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-4.api.aws" + "url": "https://states-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "ap-southeast-4" + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states.ap-southeast-4.amazonaws.com" + "url": "https://states.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-southeast-4" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.us-east-1.api.aws" + "url": "https://states.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.us-east-1.amazonaws.com" + "url": "https://states-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-east-1.api.aws" + "url": "https://states.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-east-1.amazonaws.com" + "url": "https://states.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states-fips.us-east-2.api.aws" + "url": "https://states-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://states-fips.us-east-2.amazonaws.com" + "url": "https://states.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-east-2.api.aws" + "url": "https://states.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.us-east-2.amazonaws.com" + "url": "https://states.us-iso-west-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://states-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://states-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://states.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://states.cn-northwest-1.amazonaws.com.cn" + "url": "https://states.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -1714,9 +587,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -1727,9 +600,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -1738,35 +611,35 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://states.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1776,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1788,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/sfn/src/main/resources/codegen-resources/service-2.json b/services/sfn/src/main/resources/codegen-resources/service-2.json index f2913493b16b..8c5cfb419ffc 100644 --- a/services/sfn/src/main/resources/codegen-resources/service-2.json +++ b/services/sfn/src/main/resources/codegen-resources/service-2.json @@ -47,11 +47,32 @@ {"shape":"StateMachineDeleting"}, {"shape":"StateMachineLimitExceeded"}, {"shape":"StateMachineTypeNotSupported"}, - {"shape":"TooManyTags"} + {"shape":"TooManyTags"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], - "documentation":"

      Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration and TracingConfiguration. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

      ", + "documentation":"

      Creates a state machine. A state machine consists of a collection of states that can do work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide.

      If you set the publish parameter of this API action to true, it publishes version 1 as the first revision of the state machine.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      CreateStateMachine is an idempotent API. Subsequent requests won’t create a duplicate resource if it was already created. CreateStateMachine's idempotency check is based on the state machine name, definition, type, LoggingConfiguration, and TracingConfiguration. The check is also based on the publish and versionDescription parameters. If a following request has a different roleArn or tags, Step Functions will ignore these differences and treat it as an idempotent request of the previous. In this case, roleArn and tags will not be updated, even if they are different.

      ", "idempotent":true }, + "CreateStateMachineAlias":{ + "name":"CreateStateMachineAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStateMachineAliasInput"}, + "output":{"shape":"CreateStateMachineAliasOutput"}, + "errors":[ + {"shape":"InvalidArn"}, + {"shape":"InvalidName"}, + {"shape":"ValidationException"}, + {"shape":"StateMachineDeleting"}, + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

      Creates an alias for a state machine that points to one or two versions of the same state machine. You can set your application to call StartExecution with an alias and update the version the alias uses without changing the client's code.

      You can also map an alias to split StartExecution requests between two versions of a state machine. To do this, add a second RoutingConfig object in the routingConfiguration parameter. You must also specify the percentage of execution run requests each version should receive in both RoutingConfig objects. Step Functions randomly chooses which version runs a given execution based on the percentage you specify.

      To create an alias that points to a single version, specify a single RoutingConfig object with a weight set to 100.

      You can create up to 100 aliases for each state machine. You must delete unused aliases using the DeleteStateMachineAlias API action.

      CreateStateMachineAlias is an idempotent API. Step Functions bases the idempotency check on the stateMachineArn, description, name, and routingConfiguration parameters. Requests that contain the same values for these parameters return a successful idempotent response without creating a duplicate resource.

      Related operations:

      " + }, "DeleteActivity":{ "name":"DeleteActivity", "http":{ @@ -77,7 +98,38 @@ {"shape":"InvalidArn"}, {"shape":"ValidationException"} ], - "documentation":"

      Deletes a state machine. This is an asynchronous operation: It sets the state machine's status to DELETING and begins the deletion process.

      If the given state machine Amazon Resource Name (ARN) is a qualified state machine ARN, it will fail with ValidationException.

      A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

      For EXPRESS state machines, the deletion will happen eventually (usually less than a minute). Running executions may emit logs after DeleteStateMachine API is called.

      " + "documentation":"

      Deletes a state machine. This is an asynchronous operation: It sets the state machine's status to DELETING and begins the deletion process.

      A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

      The following are some examples of qualified and unqualified state machine ARNs:

      • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

        arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

        If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

      • The following unqualified state machine ARN refers to a state machine named myStateMachine.

        arn:partition:states:region:account-id:stateMachine:myStateMachine

      This API action also deletes all versions and aliases associated with a state machine.

      For EXPRESS state machines, the deletion happens eventually (usually in less than a minute). Running executions may emit logs after DeleteStateMachine API is called.

      " + }, + "DeleteStateMachineAlias":{ + "name":"DeleteStateMachineAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStateMachineAliasInput"}, + "output":{"shape":"DeleteStateMachineAliasOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidArn"}, + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Deletes a state machine alias.

      After you delete a state machine alias, you can't use it to start executions. When you delete a state machine alias, Step Functions doesn't delete the state machine versions that alias references.

      Related operations:

      " + }, + "DeleteStateMachineVersion":{ + "name":"DeleteStateMachineVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStateMachineVersionInput"}, + "output":{"shape":"DeleteStateMachineVersionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidArn"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Deletes a state machine version. After you delete a version, you can't call StartExecution using that version's ARN or use the version with a state machine alias.

      Deleting a state machine version won't terminate its in-progress executions.

      You can't delete a state machine version currently referenced by one or more aliases. Before you delete a version, you must either delete the aliases or update them to point to another state machine version.

      Related operations:

      " }, "DescribeActivity":{ "name":"DescribeActivity", @@ -105,7 +157,7 @@ {"shape":"ExecutionDoesNotExist"}, {"shape":"InvalidArn"} ], - "documentation":"

      Provides all information about a state machine execution, such as the state machine associated with the execution, the execution input and output, and relevant execution metadata. Use this API action to return the Map Run ARN if the execution was dispatched by a Map Run.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      This API action is not supported by EXPRESS state machine executions unless they were dispatched by a Map Run.

      " + "documentation":"

      Provides information about a state machine execution, such as the state machine associated with the execution, the execution input and output, and relevant execution metadata. Use this API action to return the Map Run Amazon Resource Name (ARN) if the execution was dispatched by a Map Run.

      If you specify a version or alias ARN when you call the StartExecution API action, DescribeExecution returns that ARN.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      Executions of an EXPRESS state machinearen't supported by DescribeExecution unless a Map Run dispatched them.

      " }, "DescribeMapRun":{ "name":"DescribeMapRun", @@ -133,7 +185,22 @@ {"shape":"InvalidArn"}, {"shape":"StateMachineDoesNotExist"} ], - "documentation":"

      Provides information about a state machine's definition, its IAM role Amazon Resource Name (ARN), and configuration. If the state machine ARN is a qualified state machine ARN, the response returned includes the Map state's label.

      A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      " + "documentation":"

      Provides information about a state machine's definition, its IAM role Amazon Resource Name (ARN), and configuration.

      A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

      The following are some examples of qualified and unqualified state machine ARNs:

      • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

        arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

        If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

      • The following qualified state machine ARN refers to an alias named PROD.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

        If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

      • The following unqualified state machine ARN refers to a state machine named myStateMachine.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

      This API action returns the details for a state machine version if the stateMachineArn you specify is a state machine version ARN.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      " + }, + "DescribeStateMachineAlias":{ + "name":"DescribeStateMachineAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStateMachineAliasInput"}, + "output":{"shape":"DescribeStateMachineAliasOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidArn"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

      Returns details about a state machine alias.

      Related operations:

      " }, "DescribeStateMachineForExecution":{ "name":"DescribeStateMachineForExecution", @@ -147,7 +214,7 @@ {"shape":"ExecutionDoesNotExist"}, {"shape":"InvalidArn"} ], - "documentation":"

      Provides information about a state machine's definition, its execution role ARN, and configuration. If an execution was dispatched by a Map Run, the Map Run is returned in the response. Additionally, the state machine returned will be the state machine associated with the Map Run.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      This API action is not supported by EXPRESS state machines.

      " + "documentation":"

      Provides information about a state machine's definition, its execution role ARN, and configuration. If a Map Run dispatched the execution, this action returns the Map Run Amazon Resource Name (ARN) in the response. The state machine returned is the state machine associated with the Map Run.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      This API action is not supported by EXPRESS state machines.

      " }, "GetActivityTask":{ "name":"GetActivityTask", @@ -208,7 +275,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFound"} ], - "documentation":"

      Lists all executions of a state machine or a Map Run. You can list all executions related to a state machine by specifying a state machine Amazon Resource Name (ARN), or those related to a Map Run by specifying a Map Run ARN.

      Results are sorted by time, with the most recent execution first.

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      This API action is not supported by EXPRESS state machines.

      " + "documentation":"

      Lists all executions of a state machine or a Map Run. You can list all executions related to a state machine by specifying a state machine Amazon Resource Name (ARN), or those related to a Map Run by specifying a Map Run ARN.

      You can also provide a state machine alias ARN or version ARN to list the executions associated with a specific alias or version.

      Results are sorted by time, with the most recent execution first.

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.

      This API action is not supported by EXPRESS state machines.

      " }, "ListMapRuns":{ "name":"ListMapRuns", @@ -225,6 +292,38 @@ ], "documentation":"

      Lists all Map Runs that were started by a given state machine execution. Use this API action to obtain Map Run ARNs, and then call DescribeMapRun to obtain more information, if needed.

      " }, + "ListStateMachineAliases":{ + "name":"ListStateMachineAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStateMachineAliasesInput"}, + "output":{"shape":"ListStateMachineAliasesOutput"}, + "errors":[ + {"shape":"InvalidArn"}, + {"shape":"InvalidToken"}, + {"shape":"ResourceNotFound"}, + {"shape":"StateMachineDoesNotExist"}, + {"shape":"StateMachineDeleting"} + ], + "documentation":"

      Lists aliases for a specified state machine ARN. Results are sorted by time, with the most recently created aliases listed first.

      To list aliases that reference a state machine version, you can specify the version ARN in the stateMachineArn parameter.

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      Related operations:

      " + }, + "ListStateMachineVersions":{ + "name":"ListStateMachineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStateMachineVersionsInput"}, + "output":{"shape":"ListStateMachineVersionsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidArn"}, + {"shape":"InvalidToken"} + ], + "documentation":"

      Lists versions for the specified state machine Amazon Resource Name (ARN).

      The results are sorted in descending order of the version creation time.

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      Related operations:

      " + }, "ListStateMachines":{ "name":"ListStateMachines", "http":{ @@ -252,6 +351,25 @@ ], "documentation":"

      List tags for a given resource.

      Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

      " }, + "PublishStateMachineVersion":{ + "name":"PublishStateMachineVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PublishStateMachineVersionInput"}, + "output":{"shape":"PublishStateMachineVersionOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"StateMachineDeleting"}, + {"shape":"StateMachineDoesNotExist"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidArn"} + ], + "documentation":"

      Creates a version from the current revision of a state machine. Use versions to create immutable snapshots of your state machine. You can start executions from versions either directly or with an alias. To create an alias, use CreateStateMachineAlias.

      You can publish up to 1000 versions for each state machine. You must manually delete unused versions using the DeleteStateMachineVersion API action.

      PublishStateMachineVersion is an idempotent API. It doesn't create a duplicate state machine version if it already exists for the current revision. Step Functions bases PublishStateMachineVersion's idempotency check on the stateMachineArn, name, and revisionId parameters. Requests with the same parameters return a successful idempotent response. If you don't specify a revisionId, Step Functions checks for a previously published version of the state machine's current revision.

      Related operations:

      ", + "idempotent":true + }, "SendTaskFailure":{ "name":"SendTaskFailure", "http":{ @@ -316,7 +434,7 @@ {"shape":"StateMachineDeleting"}, {"shape":"ValidationException"} ], - "documentation":"

      Starts a state machine execution. If the given state machine Amazon Resource Name (ARN) is a qualified state machine ARN, it will fail with ValidationException.

      A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

      StartExecution is idempotent for STANDARD workflows. For a STANDARD workflow, if StartExecution is called with the same name and input as a running execution, the call will succeed and return the same response as the original request. If the execution is closed or if the input is different, it will return a 400 ExecutionAlreadyExists error. Names can be reused after 90 days.

      StartExecution is not idempotent for EXPRESS workflows.

      ", + "documentation":"

      Starts a state machine execution.

      A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

      The following are some examples of qualified and unqualified state machine ARNs:

      • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

        arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

        If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

      • The following qualified state machine ARN refers to an alias named PROD.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

        If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

      • The following unqualified state machine ARN refers to a state machine named myStateMachine.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

      If you start an execution with an unqualified state machine ARN, Step Functions uses the latest revision of the state machine for the execution.

      To start executions of a state machine version, call StartExecution and provide the version ARN or the ARN of an alias that points to the version.

      StartExecution is idempotent for STANDARD workflows. For a STANDARD workflow, if you call StartExecution with the same name and input as a running execution, the call succeeds and return the same response as the original request. If the execution is closed or if the input is different, it returns a 400 ExecutionAlreadyExists error. You can reuse names after 90 days.

      StartExecution isn't idempotent for EXPRESS workflows.

      ", "idempotent":true }, "StartSyncExecution":{ @@ -413,10 +531,28 @@ {"shape":"MissingRequiredParameter"}, {"shape":"StateMachineDeleting"}, {"shape":"StateMachineDoesNotExist"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"} ], - "documentation":"

      Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error.

      If the given state machine Amazon Resource Name (ARN) is a qualified state machine ARN, it will fail with ValidationException.

      A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

      All StartExecution calls within a few seconds will use the updated definition and roleArn. Executions started immediately after calling UpdateStateMachine may use the previous state machine definition and roleArn.

      ", + "documentation":"

      Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error.

      A qualified state machine ARN refers to a Distributed Map state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName.

      A qualified state machine ARN can either refer to a Distributed Map state defined within a state machine, a version ARN, or an alias ARN.

      The following are some examples of qualified and unqualified state machine ARNs:

      • The following qualified state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine named myStateMachine.

        arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel

        If you provide a qualified state machine ARN that refers to a Distributed Map state, the request fails with ValidationException.

      • The following qualified state machine ARN refers to an alias named PROD.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

        If you provide a qualified state machine ARN that refers to a version ARN or an alias ARN, the request starts execution for that version or alias.

      • The following unqualified state machine ARN refers to a state machine named myStateMachine.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

      After you update your state machine, you can set the publish parameter to true in the same action to publish a new version. This way, you can opt-in to strict versioning of your state machine.

      Step Functions assigns monotonically increasing integers for state machine versions, starting at version number 1.

      All StartExecution calls within a few seconds use the updated definition and roleArn. Executions started immediately after you call UpdateStateMachine may use the previous state machine definition and roleArn.

      ", "idempotent":true + }, + "UpdateStateMachineAlias":{ + "name":"UpdateStateMachineAlias", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStateMachineAliasInput"}, + "output":{"shape":"UpdateStateMachineAliasOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidArn"}, + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Updates the configuration of an existing state machine alias by modifying its description or routingConfiguration.

      You must specify at least one of the description or routingConfiguration parameters to update a state machine alias.

      UpdateStateMachineAlias is an idempotent API. Step Functions bases the idempotency check on the stateMachineAliasArn, description, and routingConfiguration parameters. Requests with the same parameters return an idempotent response.

      This operation is eventually consistent. All StartExecution requests made within a few seconds use the latest alias configuration. Executions started immediately after calling UpdateStateMachineAlias may use the previous routing configuration.

      Related operations:

      " } }, "shapes":{ @@ -566,6 +702,11 @@ "documentation":"

      The maximum number of workers concurrently polling for activity tasks has been reached.

      ", "exception":true }, + "AliasDescription":{ + "type":"string", + "max":256, + "sensitive":true + }, "Arn":{ "type":"string", "max":256, @@ -593,6 +734,12 @@ }, "documentation":"

      An object that describes workflow billing details.

      " }, + "CharacterRestrictedName":{ + "type":"string", + "max":80, + "min":1, + "pattern":"^(?=.*[a-zA-Z_\\-\\.])[a-zA-Z0-9_\\-\\.]+$" + }, "CloudWatchEventsExecutionDataDetails":{ "type":"structure", "members":{ @@ -613,6 +760,14 @@ }, "documentation":"

      " }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

      Updating or deleting a resource can cause an inconsistent state. This error occurs when there're concurrent requests for DeleteStateMachineVersion, PublishStateMachineVersion, or UpdateStateMachine with the publish parameter set to true.

      HTTP Status Code: 409

      ", + "exception":true + }, "ConnectorParameters":{ "type":"string", "max":262144, @@ -650,6 +805,44 @@ } } }, + "CreateStateMachineAliasInput":{ + "type":"structure", + "required":[ + "name", + "routingConfiguration" + ], + "members":{ + "description":{ + "shape":"AliasDescription", + "documentation":"

      A description for the state machine alias.

      " + }, + "name":{ + "shape":"CharacterRestrictedName", + "documentation":"

      The name of the state machine alias.

      To avoid conflict with version ARNs, don't use an integer in the name of the alias.

      " + }, + "routingConfiguration":{ + "shape":"RoutingConfigurationList", + "documentation":"

      The routing configuration of a state machine alias. The routing configuration shifts execution traffic between two state machine versions. routingConfiguration contains an array of RoutingConfig objects that specify up to two state machine versions. Step Functions then randomly choses which version to run an execution with based on the weight assigned to each RoutingConfig.

      " + } + } + }, + "CreateStateMachineAliasOutput":{ + "type":"structure", + "required":[ + "stateMachineAliasArn", + "creationDate" + ], + "members":{ + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the created state machine alias.

      " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The date the state machine alias was created.

      " + } + } + }, "CreateStateMachineInput":{ "type":"structure", "required":[ @@ -685,6 +878,14 @@ "tracingConfiguration":{ "shape":"TracingConfiguration", "documentation":"

      Selects whether X-Ray tracing is enabled.

      " + }, + "publish":{ + "shape":"Publish", + "documentation":"

      Set to true to publish the first version of the state machine during creation. The default is false.

      " + }, + "versionDescription":{ + "shape":"VersionDescription", + "documentation":"

      Sets description about the state machine version. You can only set the description if the publish parameter is set to true. Otherwise, if you set versionDescription, but publish to false, this API action throws ValidationException.

      " } } }, @@ -702,6 +903,10 @@ "creationDate":{ "shape":"Timestamp", "documentation":"

      The date the state machine is created.

      " + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies the created state machine version. If you do not set the publish parameter to true, this field returns null value.

      " } } }, @@ -726,6 +931,21 @@ "members":{ } }, + "DeleteStateMachineAliasInput":{ + "type":"structure", + "required":["stateMachineAliasArn"], + "members":{ + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias to delete.

      " + } + } + }, + "DeleteStateMachineAliasOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteStateMachineInput":{ "type":"structure", "required":["stateMachineArn"], @@ -741,6 +961,21 @@ "members":{ } }, + "DeleteStateMachineVersionInput":{ + "type":"structure", + "required":["stateMachineVersionArn"], + "members":{ + "stateMachineVersionArn":{ + "shape":"LongArn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine version to delete.

      " + } + } + }, + "DeleteStateMachineVersionOutput":{ + "type":"structure", + "members":{ + } + }, "DescribeActivityInput":{ "type":"structure", "required":["activityArn"], @@ -814,7 +1049,7 @@ }, "stopDate":{ "shape":"Timestamp", - "documentation":"

      If the execution has already ended, the date the execution stopped.

      " + "documentation":"

      If the execution ended, the date the execution stopped.

      " }, "input":{ "shape":"SensitiveData", @@ -841,6 +1076,14 @@ "cause":{ "shape":"SensitiveCause", "documentation":"

      The cause string if the state machine execution failed.

      " + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine version associated with the execution. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

      If you start an execution from a StartExecution request without specifying a state machine version or alias ARN, Step Functions returns a null value.

      " + }, + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias associated with the execution. The alias ARN is a combination of state machine ARN and the alias name separated by a colon (:). For example, stateMachineARN:PROD.

      If you start an execution from a StartExecution request with a state machine version ARN, this field will be null.

      " } } }, @@ -910,6 +1153,45 @@ } } }, + "DescribeStateMachineAliasInput":{ + "type":"structure", + "required":["stateMachineAliasArn"], + "members":{ + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias.

      " + } + } + }, + "DescribeStateMachineAliasOutput":{ + "type":"structure", + "members":{ + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias.

      " + }, + "name":{ + "shape":"Name", + "documentation":"

      The name of the state machine alias.

      " + }, + "description":{ + "shape":"AliasDescription", + "documentation":"

      A description of the alias.

      " + }, + "routingConfiguration":{ + "shape":"RoutingConfigurationList", + "documentation":"

      The routing configuration of the alias.

      " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The date the state machine alias was created.

      " + }, + "updateDate":{ + "shape":"Timestamp", + "documentation":"

      The date the state machine alias was last updated.

      For a newly created state machine, this is the same as the creation date.

      " + } + } + }, "DescribeStateMachineForExecutionInput":{ "type":"structure", "required":["executionArn"], @@ -962,6 +1244,10 @@ "label":{ "shape":"MapRunLabel", "documentation":"

      A user-defined or an auto-generated string that identifies a Map state. This field is returned only if the executionArn is a child workflow execution that was started by a Distributed Map state.

      " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

      The revision identifier for the state machine. The first revision ID when you create the state machine is null.

      Use the state machine revisionId parameter to compare the revision of a state machine with the configuration of the state machine used for executions without performing a diff of the properties, such as definition and roleArn.

      " } } }, @@ -971,7 +1257,7 @@ "members":{ "stateMachineArn":{ "shape":"Arn", - "documentation":"

      The Amazon Resource Name (ARN) of the state machine to describe.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the state machine for which you want the information.

      If you specify a state machine version ARN, this API returns details about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

      " } } }, @@ -988,7 +1274,7 @@ "members":{ "stateMachineArn":{ "shape":"Arn", - "documentation":"

      The Amazon Resource Name (ARN) that identifies the state machine.

      " + "documentation":"

      The Amazon Resource Name (ARN) that identifies the state machine.

      If you specified a state machine version ARN in your request, the API returns the version ARN. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

      " }, "name":{ "shape":"Name", @@ -1012,7 +1298,7 @@ }, "creationDate":{ "shape":"Timestamp", - "documentation":"

      The date the state machine is created.

      " + "documentation":"

      The date the state machine is created.

      For a state machine version, creationDate is the date the version was created.

      " }, "loggingConfiguration":{"shape":"LoggingConfiguration"}, "tracingConfiguration":{ @@ -1022,6 +1308,14 @@ "label":{ "shape":"MapRunLabel", "documentation":"

      A user-defined or an auto-generated string that identifies a Map state. This parameter is present only if the stateMachineArn specified in input is a qualified state machine ARN.

      " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

      The revision identifier for the state machine.

      Use the revisionId parameter to compare between versions of a state machine configuration used for executions without performing a diff of the properties, such as definition and roleArn.

      " + }, + "description":{ + "shape":"VersionDescription", + "documentation":"

      The description of the state machine version.

      " } } }, @@ -1100,7 +1394,7 @@ }, "stateMachineArn":{ "shape":"Arn", - "documentation":"

      The Amazon Resource Name (ARN) of the executed state machine.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the state machine that ran the execution.

      " }, "name":{ "shape":"Name", @@ -1126,6 +1420,14 @@ "shape":"UnsignedInteger", "documentation":"

      The total number of items processed in a child workflow execution. This field is returned only if mapRunArn was specified in the ListExecutions API action. If stateMachineArn was specified in ListExecutions, the itemCount field isn't returned.

      ", "box":true + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine version associated with the execution.

      If the state machine execution was started with an unqualified ARN, it returns null.

      If the execution was started using a stateMachineAliasArn, both the stateMachineAliasArn and stateMachineVersionArn parameters contain the respective values.

      " + }, + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias used to start an execution.

      If the state machine execution was started with an unqualified ARN or a version ARN, it returns null.

      " } }, "documentation":"

      Contains details about an execution.

      " @@ -1144,6 +1446,14 @@ "roleArn":{ "shape":"Arn", "documentation":"

      The Amazon Resource Name (ARN) of the IAM role used for executing Lambda tasks.

      " + }, + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies a state machine alias used for starting the state machine execution.

      " + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies a state machine version used for starting the state machine execution.

      " } }, "documentation":"

      Contains details about the start of the execution.

      " @@ -1650,7 +1960,7 @@ "members":{ "stateMachineArn":{ "shape":"Arn", - "documentation":"

      The Amazon Resource Name (ARN) of the state machine whose executions is listed.

      You can specify either a mapRunArn or a stateMachineArn, but not both.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the state machine whose executions is listed.

      You can specify either a mapRunArn or a stateMachineArn, but not both.

      You can also return a list of executions associated with a specific alias or version, by specifying an alias ARN or a version ARN in the stateMachineArn parameter.

      " }, "statusFilter":{ "shape":"ExecutionStatus", @@ -1721,6 +2031,70 @@ } } }, + "ListStateMachineAliasesInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine for which you want to list aliases.

      If you specify a state machine version ARN, this API returns a list of aliases for that version.

      " + }, + "nextToken":{ + "shape":"PageToken", + "documentation":"

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

      The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.

      This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.

      " + } + } + }, + "ListStateMachineAliasesOutput":{ + "type":"structure", + "required":["stateMachineAliases"], + "members":{ + "stateMachineAliases":{ + "shape":"StateMachineAliasList", + "documentation":"

      Aliases for the state machine.

      " + }, + "nextToken":{ + "shape":"PageToken", + "documentation":"

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + } + } + }, + "ListStateMachineVersionsInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine.

      " + }, + "nextToken":{ + "shape":"PageToken", + "documentation":"

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

      The maximum number of results that are returned per call. You can use nextToken to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.

      This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.

      " + } + } + }, + "ListStateMachineVersionsOutput":{ + "type":"structure", + "required":["stateMachineVersions"], + "members":{ + "stateMachineVersions":{ + "shape":"StateMachineVersionList", + "documentation":"

      Versions for the state machine.

      " + }, + "nextToken":{ + "shape":"PageToken", + "documentation":"

      If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

      " + } + } + }, "ListStateMachinesInput":{ "type":"structure", "members":{ @@ -2027,16 +2401,77 @@ "max":1024, "min":1 }, + "Publish":{"type":"boolean"}, + "PublishStateMachineVersionInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine.

      " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

      Only publish the state machine version if the current state machine's revision ID matches the specified ID.

      Use this option to avoid publishing a version if the state machine changed since you last updated it. If the specified revision ID doesn't match the state machine's current revision ID, the API returns ConflictException.

      To specify an initial revision ID for a state machine with no revision ID assigned, specify the string INITIAL for the revisionId parameter. For example, you can specify a revisionID of INITIAL when you create a state machine using the CreateStateMachine API action.

      " + }, + "description":{ + "shape":"VersionDescription", + "documentation":"

      An optional description of the state machine version.

      " + } + } + }, + "PublishStateMachineVersionOutput":{ + "type":"structure", + "required":[ + "creationDate", + "stateMachineVersionArn" + ], + "members":{ + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The date the version was created.

      " + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) (ARN) that identifies the state machine version.

      " + } + } + }, "ResourceNotFound":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"}, "resourceName":{"shape":"Arn"} }, - "documentation":"

      Could not find the referenced resource. Only state machine and activity ARNs are supported.

      ", + "documentation":"

      Could not find the referenced resource.

      ", "exception":true }, "ReverseOrder":{"type":"boolean"}, + "RevisionId":{"type":"string"}, + "RoutingConfigurationList":{ + "type":"list", + "member":{"shape":"RoutingConfigurationListItem"}, + "max":2, + "min":1 + }, + "RoutingConfigurationListItem":{ + "type":"structure", + "required":[ + "stateMachineVersionArn", + "weight" + ], + "members":{ + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies one or two state machine versions defined in the routing configuration.

      If you specify the ARN of a second version, it must belong to the same state machine as the first version.

      " + }, + "weight":{ + "shape":"VersionWeight", + "documentation":"

      The percentage of traffic you want to route to the second state machine version. The sum of the weights in the routing configuration must be equal to 100.

      " + } + }, + "documentation":"

      Contains details about the routing configuration of a state machine alias. In a routing configuration, you define an array of objects that specify up to two state machine versions. You also specify the percentage of traffic to be routed to each version.

      " + }, "SendTaskFailureInput":{ "type":"structure", "required":["taskToken"], @@ -2119,17 +2554,25 @@ "min":0, "sensitive":true }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

      The request would cause a service quota to be exceeded.

      HTTP Status Code: 402

      ", + "exception":true + }, "StartExecutionInput":{ "type":"structure", "required":["stateMachineArn"], "members":{ "stateMachineArn":{ "shape":"Arn", - "documentation":"

      The Amazon Resource Name (ARN) of the state machine to execute.

      " + "documentation":"

      The Amazon Resource Name (ARN) of the state machine to execute.

      The stateMachineArn parameter accepts one of the following inputs:

      • An unqualified state machine ARN – Refers to a state machine ARN that isn't qualified with a version or alias ARN. The following is an example of an unqualified state machine ARN.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>

        Step Functions doesn't associate state machine executions that you start with an unqualified ARN with a version. This is true even if that version uses the same revision that the execution used.

      • A state machine version ARN – Refers to a version ARN, which is a combination of state machine ARN and the version number separated by a colon (:). The following is an example of the ARN for version 10.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine>:10

        Step Functions doesn't associate executions that you start with a version ARN with any aliases that point to that version.

      • A state machine alias ARN – Refers to an alias ARN, which is a combination of state machine ARN and the alias name separated by a colon (:). The following is an example of the ARN for an alias named PROD.

        arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachine:PROD>

        Step Functions associates executions that you start with an alias ARN with that alias and the state machine version used for that execution.

      " }, "name":{ "shape":"Name", - "documentation":"

      The name of the execution. This name must be unique for your Amazon Web Services account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the Step Functions Developer Guide.

      A name must not contain:

      • white space

      • brackets < > { } [ ]

      • wildcard characters ? *

      • special characters \" # % \\ ^ | ~ ` $ & , ; : /

      • control characters (U+0000-001F, U+007F-009F)

      To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

      " + "documentation":"

      Optional name of the execution. This name must be unique for your Amazon Web Services account, Region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the Step Functions Developer Guide.

      A name must not contain:

      • white space

      • brackets < > { } [ ]

      • wildcard characters ? *

      • special characters \" # % \\ ^ | ~ ` $ & , ; : /

      • control characters (U+0000-001F, U+007F-009F)

      To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

      " }, "input":{ "shape":"SensitiveData", @@ -2279,6 +2722,28 @@ }, "documentation":"

      Contains details about an exit from a state during an execution.

      " }, + "StateMachineAliasList":{ + "type":"list", + "member":{"shape":"StateMachineAliasListItem"} + }, + "StateMachineAliasListItem":{ + "type":"structure", + "required":[ + "stateMachineAliasArn", + "creationDate" + ], + "members":{ + "stateMachineAliasArn":{ + "shape":"LongArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies a state machine alias. The alias ARN is a combination of state machine ARN and the alias name separated by a colon (:). For example, stateMachineARN:PROD.

      " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The creation date of a state machine alias.

      " + } + }, + "documentation":"

      Contains details about a specific state machine alias.

      " + }, "StateMachineAlreadyExists":{ "type":"structure", "members":{ @@ -2365,6 +2830,28 @@ "documentation":"

      ", "exception":true }, + "StateMachineVersionList":{ + "type":"list", + "member":{"shape":"StateMachineVersionListItem"} + }, + "StateMachineVersionListItem":{ + "type":"structure", + "required":[ + "stateMachineVersionArn", + "creationDate" + ], + "members":{ + "stateMachineVersionArn":{ + "shape":"LongArn", + "documentation":"

      The Amazon Resource Name (ARN) that identifies a state machine version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1.

      " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The creation date of a state machine version.

      " + } + }, + "documentation":"

      Contains details about a specific state machine version.

      " + }, "StopExecutionInput":{ "type":"structure", "required":["executionArn"], @@ -2796,6 +3283,34 @@ "members":{ } }, + "UpdateStateMachineAliasInput":{ + "type":"structure", + "required":["stateMachineAliasArn"], + "members":{ + "stateMachineAliasArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the state machine alias.

      " + }, + "description":{ + "shape":"AliasDescription", + "documentation":"

      A description of the state machine alias.

      " + }, + "routingConfiguration":{ + "shape":"RoutingConfigurationList", + "documentation":"

      The routing configuration of the state machine alias.

      An array of RoutingConfig objects that specifies up to two state machine versions that the alias starts executions for.

      " + } + } + }, + "UpdateStateMachineAliasOutput":{ + "type":"structure", + "required":["updateDate"], + "members":{ + "updateDate":{ + "shape":"Timestamp", + "documentation":"

      The date and time the state machine alias was updated.

      " + } + } + }, "UpdateStateMachineInput":{ "type":"structure", "required":["stateMachineArn"], @@ -2814,11 +3329,19 @@ }, "loggingConfiguration":{ "shape":"LoggingConfiguration", - "documentation":"

      The LoggingConfiguration data type is used to set CloudWatch Logs options.

      " + "documentation":"

      Use the LoggingConfiguration data type to set CloudWatch Logs options.

      " }, "tracingConfiguration":{ "shape":"TracingConfiguration", "documentation":"

      Selects whether X-Ray tracing is enabled.

      " + }, + "publish":{ + "shape":"Publish", + "documentation":"

      Specifies whether the state machine version is published. The default is false. To publish a version after updating the state machine, set publish to true.

      " + }, + "versionDescription":{ + "shape":"VersionDescription", + "documentation":"

      An optional description of the state machine version to publish.

      You can only specify the versionDescription parameter if you've set publish to true.

      " } } }, @@ -2829,6 +3352,14 @@ "updateDate":{ "shape":"Timestamp", "documentation":"

      The date and time the state machine was updated.

      " + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

      The revision identifier for the updated state machine.

      " + }, + "stateMachineVersionArn":{ + "shape":"Arn", + "documentation":"

      The Amazon Resource Name (ARN) of the published state machine version.

      If the publish parameter isn't set to true, this field returns null.

      " } } }, @@ -2849,9 +3380,20 @@ "enum":[ "API_DOES_NOT_SUPPORT_LABELED_ARNS", "MISSING_REQUIRED_PARAMETER", - "CANNOT_UPDATE_COMPLETED_MAP_RUN" + "CANNOT_UPDATE_COMPLETED_MAP_RUN", + "INVALID_ROUTING_CONFIGURATION" ] }, + "VersionDescription":{ + "type":"string", + "max":256, + "sensitive":true + }, + "VersionWeight":{ + "type":"integer", + "max":100, + "min":0 + }, "includedDetails":{"type":"boolean"}, "truncated":{"type":"boolean"} }, diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 443ad2d0688a..641a3ca7358c 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/signer/pom.xml b/services/signer/pom.xml index a116265bb427..5531671fa56d 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/signer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/signer/src/main/resources/codegen-resources/endpoint-rule-set.json index d1addb57ad1e..0774b7ef7479 100644 --- a/services/signer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/signer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,168 +111,238 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://signer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://signer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://signer-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://signer-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://signer.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://signer.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://signer.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://signer.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/signer/src/main/resources/codegen-resources/endpoint-tests.json b/services/signer/src/main/resources/codegen-resources/endpoint-tests.json index fe3174a2981a..726d6a211a25 100644 --- a/services/signer/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/signer/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,887 +1,55 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://signer-fips.ap-east-1.api.aws" + "url": "https://signer-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer-fips.ap-east-1.amazonaws.com" + "url": "https://signer-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://signer.ap-east-1.api.aws" + "url": "https://signer.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer.ap-east-1.amazonaws.com" + "url": "https://signer.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -892,9 +60,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -905,9 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -918,9 +86,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -931,334 +99,183 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://signer.us-gov-west-1.amazonaws.com" + "url": "https://signer-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer-fips.ap-southeast-1.api.aws" + "url": "https://signer-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://signer-fips.ap-southeast-1.amazonaws.com" + "url": "https://signer.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer.ap-southeast-1.api.aws" + "url": "https://signer.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://signer.ap-southeast-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer-fips.ap-southeast-2.api.aws" + "url": "https://signer-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://signer-fips.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer.ap-southeast-2.api.aws" + "url": "https://signer.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://signer.ap-southeast-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer-fips.us-east-1.api.aws" + "url": "https://signer-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://signer-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://signer.us-east-1.api.aws" + "url": "https://signer.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://signer.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://signer.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://signer.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1268,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1280,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/signer/src/main/resources/codegen-resources/service-2.json b/services/signer/src/main/resources/codegen-resources/service-2.json index 4902d01f6af4..7e7a700da3f0 100644 --- a/services/signer/src/main/resources/codegen-resources/service-2.json +++ b/services/signer/src/main/resources/codegen-resources/service-2.json @@ -63,6 +63,23 @@ ], "documentation":"

      Returns information about a specific code signing job. You specify the job by using the jobId value that is returned by the StartSigningJob operation.

      " }, + "GetRevocationStatus":{ + "name":"GetRevocationStatus", + "http":{ + "method":"GET", + "requestUri":"/revocations" + }, + "input":{"shape":"GetRevocationStatusRequest"}, + "output":{"shape":"GetRevocationStatusResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

      Retrieves the revocation status of one or more of the signing profile, signing job, and signing certificate.

      ", + "endpoint":{"hostPrefix":"verification."} + }, "GetSigningPlatform":{ "name":"GetSigningPlatform", "http":{ @@ -190,7 +207,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

      Creates a signing profile. A signing profile is a code signing template that can be used to carry out a pre-defined signing job. For more information, see http://docs.aws.amazon.com/signer/latest/developerguide/gs-profile.html

      " + "documentation":"

      Creates a signing profile. A signing profile is a code signing template that can be used to carry out a pre-defined signing job.

      " }, "RemoveProfilePermission":{ "name":"RemoveProfilePermission", @@ -242,6 +259,23 @@ ], "documentation":"

      Changes the state of a signing profile to REVOKED. This indicates that signatures generated using the signing profile after an effective start date are no longer valid.

      " }, + "SignPayload":{ + "name":"SignPayload", + "http":{ + "method":"POST", + "requestUri":"/signing-jobs/with-payload" + }, + "input":{"shape":"SignPayloadRequest"}, + "output":{"shape":"SignPayloadResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

      Signs a binary payload and returns a signature envelope.

      " + }, "StartSigningJob":{ "name":"StartSigningJob", "http":{ @@ -258,7 +292,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

      Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:

      • You must create an Amazon S3 source bucket. For more information, see Create a Bucket in the Amazon S3 Getting Started Guide.

      • Your S3 source bucket must be version enabled.

      • You must create an S3 destination bucket. Code signing uses your S3 destination bucket to write your signed code.

      • You specify the name of the source and destination buckets when calling the StartSigningJob operation.

      • You must also specify a request token that identifies your request to code signing.

      You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

      For a Java example that shows how to use this action, see http://docs.aws.amazon.com/acm/latest/userguide/

      " + "documentation":"

      Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:

      • You must create an Amazon S3 source bucket. For more information, see Creating a Bucket in the Amazon S3 Getting Started Guide.

      • Your S3 source bucket must be version enabled.

      • You must create an S3 destination bucket. Code signing uses your S3 destination bucket to write your signed code.

      • You specify the name of the source and destination buckets when calling the StartSigningJob operation.

      • You must also specify a request token that identifies your request to code signing.

      You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

      For a Java example that shows how to use this action, see StartSigningJob.

      " }, "TagResource":{ "name":"TagResource", @@ -371,6 +405,7 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Blob":{"type":"blob"}, "BucketName":{"type":"string"}, "CancelSigningProfileRequest":{ "type":"structure", @@ -389,6 +424,10 @@ "enum":["AWSIoT"] }, "CertificateArn":{"type":"string"}, + "CertificateHashes":{ + "type":"list", + "member":{"shape":"String"} + }, "ClientRequestToken":{"type":"string"}, "ConflictException":{ "type":"structure", @@ -535,6 +574,57 @@ }, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "GetRevocationStatusRequest":{ + "type":"structure", + "required":[ + "signatureTimestamp", + "platformId", + "profileVersionArn", + "jobArn", + "certificateHashes" + ], + "members":{ + "signatureTimestamp":{ + "shape":"Timestamp", + "documentation":"

      The timestamp of the signature that validates the profile or job.

      ", + "location":"querystring", + "locationName":"signatureTimestamp" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

      The ID of a signing platform.

      ", + "location":"querystring", + "locationName":"platformId" + }, + "profileVersionArn":{ + "shape":"Arn", + "documentation":"

      The version of a signing profile.

      ", + "location":"querystring", + "locationName":"profileVersionArn" + }, + "jobArn":{ + "shape":"Arn", + "documentation":"

      The ARN of a signing job.

      ", + "location":"querystring", + "locationName":"jobArn" + }, + "certificateHashes":{ + "shape":"CertificateHashes", + "documentation":"

      A list of composite signed hashes that identify certificates.

      A certificate identifier consists of a subject certificate TBS hash (signed by the parent CA) combined with a parent CA TBS hash (signed by the parent CA’s CA). Root certificates are defined as their own CA.

      ", + "location":"querystring", + "locationName":"certificateHashes" + } + } + }, + "GetRevocationStatusResponse":{ + "type":"structure", + "members":{ + "revokedEntities":{ + "shape":"RevokedEntities", + "documentation":"

      A list of revoked entities (including one or more of the signing profile ARN, signing job ID, and certificate hash) supplied as input to the API.

      " + } + } + }, "GetSigningPlatformRequest":{ "type":"structure", "required":["platformId"], @@ -950,6 +1040,11 @@ "min":1 }, "MaxSizeInMB":{"type":"integer"}, + "Metadata":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "NextToken":{"type":"string"}, "NotFoundException":{ "type":"structure", @@ -961,6 +1056,11 @@ "error":{"httpStatusCode":404}, "exception":true }, + "Payload":{ + "type":"blob", + "max":4096, + "min":1 + }, "Permission":{ "type":"structure", "members":{ @@ -1163,6 +1263,10 @@ } } }, + "RevokedEntities":{ + "type":"list", + "member":{"shape":"String"} + }, "S3Destination":{ "type":"structure", "members":{ @@ -1224,6 +1328,53 @@ "error":{"httpStatusCode":402}, "exception":true }, + "SignPayloadRequest":{ + "type":"structure", + "required":[ + "profileName", + "payload", + "payloadFormat" + ], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

      The name of the signing profile.

      " + }, + "profileOwner":{ + "shape":"AccountId", + "documentation":"

      The AWS account ID of the profile owner.

      " + }, + "payload":{ + "shape":"Payload", + "documentation":"

      Specifies the object digest (hash) to sign.

      " + }, + "payloadFormat":{ + "shape":"String", + "documentation":"

      Payload content type

      " + } + } + }, + "SignPayloadResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

      Unique identifier of the signing job.

      " + }, + "jobOwner":{ + "shape":"AccountId", + "documentation":"

      The AWS account ID of the job owner.

      " + }, + "metadata":{ + "shape":"Metadata", + "documentation":"

      Information including the signing profile ARN and the signing job ID. Clients use metadata to signature records, for example, as annotations added to the signature manifest inside an OCI registry.

      " + }, + "signature":{ + "shape":"Blob", + "documentation":"

      A cryptographic signature.

      " + } + } + }, "SignatureValidityPeriod":{ "type":"structure", "members":{ @@ -1405,7 +1556,7 @@ "members":{ "platformId":{ "shape":"String", - "documentation":"

      The ID of a code signing; platform.

      " + "documentation":"

      The ID of a code signing platform.

      " }, "displayName":{ "shape":"String", @@ -1724,5 +1875,5 @@ "bool":{"type":"boolean"}, "string":{"type":"string"} }, - "documentation":"

      AWS Signer is a fully managed code signing service to help you ensure the trust and integrity of your code.

      AWS Signer supports the following applications:

      With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3.

      With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management.

      For more information about AWS Signer, see the AWS Signer Developer Guide.

      " + "documentation":"

      AWS Signer is a fully managed code signing service to help you ensure the trust and integrity of your code.

      AWS Signer supports the following applications:

      With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3.

      With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management.

      With code signing for containers …(TBD)

      For more information about AWS Signer, see the AWS Signer Developer Guide.

      " } diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 300bc4fa14c8..e676134a7955 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json index d44d3bd03f2c..38383d6e5e0c 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -123,9 +123,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -147,9 +147,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -160,9 +160,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -173,9 +173,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -186,9 +186,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -199,9 +199,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -223,9 +223,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -247,9 +247,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -260,9 +260,9 @@ } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json index e3a9711634f1..7f94c800ddf9 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json @@ -368,7 +368,7 @@ "members":{ "Destination":{ "shape":"S3Destination", - "documentation":"

      The Amazon S3 bucket and optional folder (object key prefix) where SimSpace Weaver creates the snapshot file.

      " + "documentation":"

      The Amazon S3 bucket and optional folder (object key prefix) where SimSpace Weaver creates the snapshot file.

      The Amazon S3 bucket must be in the same Amazon Web Services Region as the simulation.

      " }, "Simulation":{ "shape":"SimSpaceWeaverResourceName", @@ -751,7 +751,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):log-group:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:role\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):log-group:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:role\\/(.+)$" }, "LoggingConfiguration":{ "type":"structure", @@ -806,7 +806,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):iam::(\\d{12})?:role\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):iam::(\\d{12})?:role\\/(.+)$" }, "S3Destination":{ "type":"structure", @@ -852,7 +852,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):simspaceweaver:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:([a-z]+)\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):simspaceweaver:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:([a-z]+)\\/(.+)$" }, "SimSpaceWeaverLongResourceName":{ "type":"string", @@ -1112,7 +1112,7 @@ }, "SnapshotS3Location":{ "shape":"S3Location", - "documentation":"

      The location of the snapshot .zip file in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

      Provide a SnapshotS3Location to start your simulation from a snapshot.

      If you provide a SnapshotS3Location then you can't provide a SchemaS3Location.

      " + "documentation":"

      The location of the snapshot .zip file in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

      Provide a SnapshotS3Location to start your simulation from a snapshot.

      The Amazon S3 bucket must be in the same Amazon Web Services Region as the simulation.

      If you provide a SnapshotS3Location then you can't provide a SchemaS3Location.

      " }, "Tags":{ "shape":"TagMap", diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 5ddb7d319605..bf2e8fa7e400 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 65d2245928c5..04b5555f8c09 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index 2eff8993741d..8d3f798da301 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 740b587f8799..04c3fec1c5b4 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 3c917eed485d..85bb28c5ff9e 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/sqs/src/main/resources/codegen-resources/service-2.json b/services/sqs/src/main/resources/codegen-resources/service-2.json index c1a758508ca3..59c24abf0823 100644 --- a/services/sqs/src/main/resources/codegen-resources/service-2.json +++ b/services/sqs/src/main/resources/codegen-resources/service-2.json @@ -22,7 +22,24 @@ "errors":[ {"shape":"OverLimit"} ], - "documentation":"

      Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

      When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide.

      • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon SQS Developer Guide.

      • An Amazon SQS policy can have a maximum of 7 actions.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

      &AttributeName.1=first

      &AttributeName.2=second

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

      When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide.

      • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon SQS Developer Guide.

      • An Amazon SQS policy can have a maximum of seven actions per statement.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      • Amazon SQS AddPermission does not support adding a non-account principal.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " + }, + "CancelMessageMoveTask":{ + "name":"CancelMessageMoveTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelMessageMoveTaskRequest"}, + "output":{ + "shape":"CancelMessageMoveTaskResult", + "resultWrapper":"CancelMessageMoveTaskResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ], + "documentation":"

      Cancels a specified message movement task.

      • A message movement can only be cancelled when the current status is RUNNING.

      • Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet.

      " }, "ChangeMessageVisibility":{ "name":"ChangeMessageVisibility", @@ -35,7 +52,7 @@ {"shape":"MessageNotInflight"}, {"shape":"ReceiptHandleIsInvalid"} ], - "documentation":"

      Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

      For example, you have a message with a visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes. You can continue to call ChangeMessageVisibility to extend the visibility timeout to the maximum allowed time. If you try to extend the visibility timeout beyond the maximum, your request is rejected.

      An Amazon SQS message has three basic states:

      1. Sent to a queue by a producer.

      2. Received from the queue by a consumer.

      3. Deleted from the queue.

      A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of inflight messages.

      Limits that apply to inflight messages are unrelated to the unlimited number of stored messages.

      For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

      For FIFO queues, there can be a maximum of 20,000 inflight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

      If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

      Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

      " + "documentation":"

      Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide.

      For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error.

      An Amazon SQS message has three basic states:

      1. Sent to a queue by a producer.

      2. Received from the queue by a consumer.

      3. Deleted from the queue.

      A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages.

      Limits that apply to in flight messages are unrelated to the unlimited number of stored messages.

      For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request.

      For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

      If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time.

      Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received.

      " }, "ChangeMessageVisibilityBatch":{ "name":"ChangeMessageVisibilityBatch", @@ -54,7 +71,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

      Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

      Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

      &AttributeName.1=first

      &AttributeName.2=second

      " + "documentation":"

      Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

      Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      " }, "CreateQueue":{ "name":"CreateQueue", @@ -71,7 +88,7 @@ {"shape":"QueueDeletedRecently"}, {"shape":"QueueNameExists"} ], - "documentation":"

      Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind:

      • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

        You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide.

      • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

      • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

      To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

      After you create a queue, you must wait at least one second after the queue is created to be able to use the queue.

      To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

      • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

      • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

      Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

      &AttributeName.1=first

      &AttributeName.2=second

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind:

      • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

        You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide.

      • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

      • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

      To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

      After you create a queue, you must wait at least one second after the queue is created to be able to use the queue.

      To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

      • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

      • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " }, "DeleteMessage":{ "name":"DeleteMessage", @@ -84,7 +101,7 @@ {"shape":"InvalidIdFormat"}, {"shape":"ReceiptHandleIsInvalid"} ], - "documentation":"

      Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue.

      The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message might not be deleted).

      For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues.

      " + "documentation":"

      Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue.

      The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message will not be deleted).

      For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues.

      " }, "DeleteMessageBatch":{ "name":"DeleteMessageBatch", @@ -103,7 +120,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

      Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

      Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

      &AttributeName.1=first

      &AttributeName.2=second

      " + "documentation":"

      Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

      Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      " }, "DeleteQueue":{ "name":"DeleteQueue", @@ -112,7 +129,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteQueueRequest"}, - "documentation":"

      Deletes the queue specified by the QueueUrl, regardless of the queue's contents.

      Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

      When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

      When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Deletes the queue specified by the QueueUrl, regardless of the queue's contents.

      Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

      When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

      When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      The delete operation uses the HTTP GET verb.

      " }, "GetQueueAttributes":{ "name":"GetQueueAttributes", @@ -162,6 +179,23 @@ ], "documentation":"

      Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead-letter queue.

      The ListDeadLetterSourceQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to ListDeadLetterSourceQueues to receive the next page of results.

      For more information about using dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon SQS Developer Guide.

      " }, + "ListMessageMoveTasks":{ + "name":"ListMessageMoveTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMessageMoveTasksRequest"}, + "output":{ + "shape":"ListMessageMoveTasksResult", + "resultWrapper":"ListMessageMoveTasksResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ], + "documentation":"

      Gets the most recent message movement tasks (up to 10) under a specific source queue.

      " + }, "ListQueueTags":{ "name":"ListQueueTags", "http":{ @@ -173,7 +207,7 @@ "shape":"ListQueueTagsResult", "resultWrapper":"ListQueueTagsResult" }, - "documentation":"

      List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " }, "ListQueues":{ "name":"ListQueues", @@ -186,7 +220,7 @@ "shape":"ListQueuesResult", "resultWrapper":"ListQueuesResult" }, - "documentation":"

      Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

      The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.

      The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " }, "PurgeQueue":{ "name":"PurgeQueue", @@ -224,7 +258,7 @@ "requestUri":"/" }, "input":{"shape":"RemovePermissionRequest"}, - "documentation":"

      Revokes any permissions in the queue policy that matches the specified Label parameter.

      • Only the owner of a queue can remove permissions from it.

      • Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      " + "documentation":"

      Revokes any permissions in the queue policy that matches the specified Label parameter.

      • Only the owner of a queue can remove permissions from it.

      • Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      " }, "SendMessage":{ "name":"SendMessage", @@ -262,7 +296,7 @@ {"shape":"InvalidBatchEntryId"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

      Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

      The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

      A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

      #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

      Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

      If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

      Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

      &AttributeName.1=first

      &AttributeName.2=second

      " + "documentation":"

      You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

      The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

      The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes).

      A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

      #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

      Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

      If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

      " }, "SetQueueAttributes":{ "name":"SetQueueAttributes", @@ -274,7 +308,24 @@ "errors":[ {"shape":"InvalidAttributeName"} ], - "documentation":"

      Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.

      • In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

      • Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      " + "documentation":"

      Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages.

      • In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

      • Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

      " + }, + "StartMessageMoveTask":{ + "name":"StartMessageMoveTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMessageMoveTaskRequest"}, + "output":{ + "shape":"StartMessageMoveTaskResult", + "resultWrapper":"StartMessageMoveTaskResult" + }, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperation"} + ], + "documentation":"

      Starts an asynchronous task to move messages from a specified source queue to a specified destination queue.

      • This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.

      • Currently, only standard queues are supported.

      • Only one active message movement task is supported per queue at any given time.

      " }, "TagQueue":{ "name":"TagQueue", @@ -283,7 +334,7 @@ "requestUri":"/" }, "input":{"shape":"TagQueueRequest"}, - "documentation":"

      Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      When you use queue tags, keep the following guidelines in mind:

      • Adding more than 50 tags to a queue isn't recommended.

      • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

      • Tags are case-sensitive.

      • A new tag with a key identical to that of an existing tag overwrites the existing tag.

      For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      When you use queue tags, keep the following guidelines in mind:

      • Adding more than 50 tags to a queue isn't recommended.

      • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

      • Tags are case-sensitive.

      • A new tag with a key identical to that of an existing tag overwrites the existing tag.

      For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " }, "UntagQueue":{ "name":"UntagQueue", @@ -292,7 +343,7 @@ "requestUri":"/" }, "input":{"shape":"UntagQueueRequest"}, - "documentation":"

      Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      " + "documentation":"

      Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      " } }, "shapes":{ @@ -420,6 +471,25 @@ "type":"integer", "box":true }, + "CancelMessageMoveTaskRequest":{ + "type":"structure", + "required":["TaskHandle"], + "members":{ + "TaskHandle":{ + "shape":"String", + "documentation":"

      An identifier associated with a message movement task.

      " + } + } + }, + "CancelMessageMoveTaskResult":{ + "type":"structure", + "members":{ + "ApproximateNumberOfMessagesMoved":{ + "shape":"Long", + "documentation":"

      The approximate number of messages already moved to the destination queue.

      " + } + } + }, "ChangeMessageVisibilityBatchRequest":{ "type":"structure", "required":[ @@ -433,7 +503,7 @@ }, "Entries":{ "shape":"ChangeMessageVisibilityBatchRequestEntryList", - "documentation":"

      A list of receipt handles of the messages for which the visibility timeout must be changed.

      " + "documentation":"

      Lists the receipt handles of the messages for which the visibility timeout must be changed.

      " } }, "documentation":"

      " @@ -458,7 +528,7 @@ "documentation":"

      The new value (in seconds) for the message's visibility timeout.

      " } }, - "documentation":"

      Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.

      All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n, where n is an integer value starting with 1. For example, a parameter list for this action might look like this:

      &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2

      &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=your_receipt_handle

      &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45

      " + "documentation":"

      Encloses a receipt handle and an entry ID for each message in ChangeMessageVisibilityBatch.

      " }, "ChangeMessageVisibilityBatchRequestEntryList":{ "type":"list", @@ -519,7 +589,7 @@ }, "ReceiptHandle":{ "shape":"String", - "documentation":"

      The receipt handle associated with the message whose visibility timeout is changed. This parameter is returned by the ReceiveMessage action.

      " + "documentation":"

      The receipt handle associated with the message, whose visibility timeout is changed. This parameter is returned by the ReceiveMessage action.

      " }, "VisibilityTimeout":{ "shape":"Integer", @@ -537,12 +607,12 @@ }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

      A map of attributes with their corresponding values.

      The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

      • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

      • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

      • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

      • Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the Amazon IAM User Guide.

      • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon SQS Developer Guide.

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

        The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

      • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the Amazon Web Services managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference.

      • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS).

      The following attributes apply only to FIFO (first-in-first-out) queues:

      • FifoQueue – Designates a queue as FIFO. Valid values are true and false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

        For more information, see FIFO queue logic in the Amazon SQS Developer Guide.

      • ContentBasedDeduplication – Enables content-based deduplication. Valid values are true and false. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following:

        • Every message must have a unique MessageDeduplicationId.

          • You may provide a MessageDeduplicationId explicitly.

          • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

          • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

          • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

        • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

        • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      ", + "documentation":"

      A map of attributes with their corresponding values.

      The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

      • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

      • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

      • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages.

      • Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the IAM User Guide.

      • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

      • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to dead-letter queues:

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

      • RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

        • redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:

          • allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue.

          • denyAll – No source queues can specify this queue as the dead-letter queue.

          • byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue.

        • sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll.

      The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the Amazon Web Services managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference.

      • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

      The following attributes apply only to FIFO (first-in-first-out) queues:

      • FifoQueue – Designates a queue as FIFO. Valid values are true and false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

        For more information, see FIFO queue logic in the Amazon SQS Developer Guide.

      • ContentBasedDeduplication – Enables content-based deduplication. Valid values are true and false. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following:

        • Every message must have a unique MessageDeduplicationId.

          • You may provide a MessageDeduplicationId explicitly.

          • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

          • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

          • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

        • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

        • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      ", "locationName":"Attribute" }, "tags":{ "shape":"TagMap", - "documentation":"

      Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      When you use queue tags, keep the following guidelines in mind:

      • Adding more than 50 tags to a queue isn't recommended.

      • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

      • Tags are case-sensitive.

      • A new tag with a key identical to that of an existing tag overwrites the existing tag.

      For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

      To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a user name in the Amazon SQS Developer Guide.

      ", + "documentation":"

      Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide.

      When you use queue tags, keep the following guidelines in mind:

      • Adding more than 50 tags to a queue isn't recommended.

      • Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

      • Tags are case-sensitive.

      • A new tag with a key identical to that of an existing tag overwrites the existing tag.

      For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide.

      To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions.

      Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide.

      ", "locationName":"Tag" } }, @@ -571,7 +641,7 @@ }, "Entries":{ "shape":"DeleteMessageBatchRequestEntryList", - "documentation":"

      A list of receipt handles for the messages to be deleted.

      " + "documentation":"

      Lists the receipt handles for the messages to be deleted.

      " } }, "documentation":"

      " @@ -585,7 +655,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

      An identifier for this particular receipt handle. This is used to communicate the result.

      The Ids of a batch request need to be unique within a request.

      This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

      " + "documentation":"

      The identifier for this particular receipt handle. This is used to communicate the result.

      The Ids of a batch request need to be unique within a request.

      This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

      " }, "ReceiptHandle":{ "shape":"String", @@ -690,7 +760,7 @@ }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

      A list of attributes for which to retrieve information.

      The AttributeName.N parameter is optional, but if you don't specify values for this parameter, the request returns empty results.

      In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

      The following attributes are supported:

      The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessagesVisible metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency.

      • All – Returns all values.

      • ApproximateNumberOfMessages – Returns the approximate number of messages available for retrieval from the queue.

      • ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

      • ApproximateNumberOfMessagesNotVisible – Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

      • CreatedTimestamp – Returns the time when the queue was created in seconds (epoch time).

      • DelaySeconds – Returns the default delay on the queue in seconds.

      • LastModifiedTimestamp – Returns the time when the queue was last changed in seconds (epoch time).

      • MaximumMessageSize – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

      • MessageRetentionPeriod – Returns the length of time, in seconds, for which Amazon SQS retains a message.

      • Policy – Returns the policy of the queue.

      • QueueArn – Returns the Amazon resource name (ARN) of the queue.

      • ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon SQS Developer Guide.

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

      • VisibilityTimeout – Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

      • KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. For more information, see How Does the Data Key Reuse Period Work?.

      • SqsManagedSseEnabled – Returns information about whether the queue is using SSE-SQS encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS).

      The following attributes apply only to FIFO (first-in-first-out) queues:

      • FifoQueue – Returns information about whether the queue is FIFO. For more information, see FIFO queue logic in the Amazon SQS Developer Guide.

        To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

      • ContentBasedDeduplication – Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer Guide.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      " + "documentation":"

      A list of attributes for which to retrieve information.

      The AttributeNames parameter is optional, but if you don't specify values for this parameter, the request returns empty results.

      In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

      The following attributes are supported:

      The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessages metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency.

      • All – Returns all values.

      • ApproximateNumberOfMessages – Returns the approximate number of messages available for retrieval from the queue.

      • ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

      • ApproximateNumberOfMessagesNotVisible – Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

      • CreatedTimestamp – Returns the time when the queue was created in seconds (epoch time).

      • DelaySeconds – Returns the default delay on the queue in seconds.

      • LastModifiedTimestamp – Returns the time when the queue was last changed in seconds (epoch time).

      • MaximumMessageSize – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

      • MessageRetentionPeriod – Returns the length of time, in seconds, for which Amazon SQS retains a message. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages.

      • Policy – Returns the policy of the queue.

      • QueueArn – Returns the Amazon resource name (ARN) of the queue.

      • ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

      • VisibilityTimeout – Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to dead-letter queues:

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

      • RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

        • redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:

          • allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue.

          • denyAll – No source queues can specify this queue as the dead-letter queue.

          • byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue.

        • sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll.

      The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

      • KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. For more information, see How Does the Data Key Reuse Period Work?.

      • SqsManagedSseEnabled – Returns information about whether the queue is using SSE-SQS encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

      The following attributes apply only to FIFO (first-in-first-out) queues:

      • FifoQueue – Returns information about whether the queue is FIFO. For more information, see FIFO queue logic in the Amazon SQS Developer Guide.

        To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

      • ContentBasedDeduplication – Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer Guide.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      " } }, "documentation":"

      " @@ -799,6 +869,79 @@ }, "documentation":"

      A list of your dead letter source queues.

      " }, + "ListMessageMoveTasksRequest":{ + "type":"structure", + "required":["SourceArn"], + "members":{ + "SourceArn":{ + "shape":"String", + "documentation":"

      The ARN of the queue whose message movement tasks are to be listed.

      " + }, + "MaxResults":{ + "shape":"Integer", + "documentation":"

      The maximum number of results to include in the response. The default is 1, which provides the most recent message movement task. The upper limit is 10.

      " + } + } + }, + "ListMessageMoveTasksResult":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"ListMessageMoveTasksResultEntryList", + "documentation":"

      A list of message movement tasks and their attributes.

      " + } + } + }, + "ListMessageMoveTasksResultEntry":{ + "type":"structure", + "members":{ + "TaskHandle":{ + "shape":"String", + "documentation":"

      An identifier associated with a message movement task. When this field is returned in the response of the ListMessageMoveTasks action, it is only populated for tasks that are in RUNNING status.

      " + }, + "Status":{ + "shape":"String", + "documentation":"

      The status of the message movement task. Possible values are: RUNNING, COMPLETED, CANCELLING, CANCELLED, and FAILED.

      " + }, + "SourceArn":{ + "shape":"String", + "documentation":"

      The ARN of the queue that contains the messages to be moved to another queue.

      " + }, + "DestinationArn":{ + "shape":"String", + "documentation":"

      The ARN of the destination queue if it has been specified in the StartMessageMoveTask request. If a DestinationArn has not been specified in the StartMessageMoveTask request, this field value will be NULL.

      " + }, + "MaxNumberOfMessagesPerSecond":{ + "shape":"Integer", + "documentation":"

      The number of messages to be moved per second (the message movement rate), if it has been specified in the StartMessageMoveTask request. If a MaxNumberOfMessagesPerSecond has not been specified in the StartMessageMoveTask request, this field value will be NULL.

      " + }, + "ApproximateNumberOfMessagesMoved":{ + "shape":"Long", + "documentation":"

      The approximate number of messages already moved to the destination queue.

      " + }, + "ApproximateNumberOfMessagesToMove":{ + "shape":"Long", + "documentation":"

      The number of messages to be moved from the source queue. This number is obtained at the time of starting the message movement task.

      " + }, + "FailureReason":{ + "shape":"String", + "documentation":"

      The task failure reason (only included if the task status is FAILED).

      " + }, + "StartedTimestamp":{ + "shape":"Long", + "documentation":"

      The timestamp of starting the message movement task.

      " + } + }, + "documentation":"

      Contains the details of a message movement task.

      " + }, + "ListMessageMoveTasksResultEntryList":{ + "type":"list", + "member":{ + "shape":"ListMessageMoveTasksResultEntry", + "locationName":"ListMessageMoveTasksResultEntry" + }, + "flattened":true + }, "ListQueueTagsRequest":{ "type":"structure", "required":["QueueUrl"], @@ -842,7 +985,7 @@ "members":{ "QueueUrls":{ "shape":"QueueUrlList", - "documentation":"

      A list of queue URLs, up to 1,000 entries, or the value of MaxResults that you sent in the request.

      " + "documentation":"

      A list of queue URLs, up to 1,000 entries, or the value of MaxResults that you sent in the request.

      " }, "NextToken":{ "shape":"Token", @@ -851,6 +994,7 @@ }, "documentation":"

      A list of your queues.

      " }, + "Long":{"type":"long"}, "Message":{ "type":"structure", "members":{ @@ -925,7 +1069,7 @@ "documentation":"

      Amazon SQS supports the following logical data types: String, Number, and Binary. For the Number data type, you must use StringValue.

      You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer Guide.

      " } }, - "documentation":"

      The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

      Name, type, value and the message body must not be empty or null. All parts of the message attribute, including Name, Type, and Value, are part of the message size restriction (256 KB or 262,144 bytes).

      " + "documentation":"

      The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see SendMessage.

      Name, type, value and the message body must not be empty or null. All parts of the message attribute, including Name, Type, and Value, are part of the message size restriction (256 KiB or 262,144 bytes).

      " }, "MessageBodyAttributeMap":{ "type":"map", @@ -994,7 +1138,8 @@ "SequenceNumber", "MessageDeduplicationId", "MessageGroupId", - "AWSTraceHeader" + "AWSTraceHeader", + "DeadLetterQueueSourceArn" ] }, "MessageSystemAttributeNameForSends":{ @@ -1036,7 +1181,7 @@ "type":"structure", "members":{ }, - "documentation":"

      The specified action violates a limit. For example, ReceiveMessage returns this error if the maximum number of inflight messages is reached and AddPermission returns this error if the maximum number of permissions for the queue is reached.

      ", + "documentation":"

      The specified action violates a limit. For example, ReceiveMessage returns this error if the maximum number of in flight messages is reached and AddPermission returns this error if the maximum number of permissions for the queue is reached.

      ", "error":{ "code":"OverLimit", "httpStatusCode":403, @@ -1168,7 +1313,7 @@ }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

      A list of attributes that need to be returned along with each message. These attributes include:

      • All – Returns all values.

      • ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds).

      • ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted.

      • AWSTraceHeader – Returns the X-Ray trace header string.

      • SenderId

        • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

        • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      • SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds).

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS).

      • MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action.

      • MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

      • SequenceNumber – Returns the value provided by Amazon SQS.

      " + "documentation":"

      A list of attributes that need to be returned along with each message. These attributes include:

      • All – Returns all values.

      • ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds).

      • ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted.

      • AWSTraceHeader – Returns the X-Ray trace header string.

      • SenderId

        • For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R.

        • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      • SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds).

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

      • MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action.

      • MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

      • SequenceNumber – Returns the value provided by Amazon SQS.

      " }, "MessageAttributeNames":{ "shape":"MessageAttributeNameList", @@ -1221,6 +1366,18 @@ }, "documentation":"

      " }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

      One or more specified resources don't exist.

      ", + "error":{ + "code":"ResourceNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, "SendMessageBatchRequest":{ "type":"structure", "required":[ @@ -1361,7 +1518,7 @@ }, "MessageBody":{ "shape":"String", - "documentation":"

      The message to send. The minimum size is one character. The maximum size is 256 KB.

      A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

      #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

      Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

      " + "documentation":"

      The message to send. The minimum size is one character. The maximum size is 256 KiB.

      A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

      #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

      Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

      " }, "DelaySeconds":{ "shape":"Integer", @@ -1427,12 +1584,39 @@ }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

      A map of attributes to set.

      The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

      • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

      • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

      • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

      • Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the Identity and Access Management User Guide.

      • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon SQS Developer Guide.

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

        The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

      • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference.

      • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (e.g. SSE-KMS or SSE-SQS).

      The following attribute applies only to FIFO (first-in-first-out) queues:

      • ContentBasedDeduplication – Enables content-based deduplication. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following:

        • Every message must have a unique MessageDeduplicationId.

          • You may provide a MessageDeduplicationId explicitly.

          • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

          • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

          • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

        • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

        • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      ", + "documentation":"

      A map of attributes to set.

      The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

      • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

      • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

      • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages.

      • Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the Identity and Access Management User Guide.

      • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

      • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide.

      The following attributes apply only to dead-letter queues:

      • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:

        • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

        • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

      • RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

        • redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:

          • allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue.

          • denyAll – No source queues can specify this queue as the dead-letter queue.

          • byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue.

        • sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll.

      The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

      The following attributes apply only to server-side-encryption:

      • KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference.

      • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

      • SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

      The following attribute applies only to FIFO (first-in-first-out) queues:

      • ContentBasedDeduplication – Enables content-based deduplication. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following:

        • Every message must have a unique MessageDeduplicationId.

          • You may provide a MessageDeduplicationId explicitly.

          • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

          • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

          • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

        • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

        • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

      The following attributes apply only to high throughput for FIFO queues:

      • DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue.

      • FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

      To enable high throughput for FIFO queues, do the following:

      • Set DeduplicationScope to messageGroup.

      • Set FifoThroughputLimit to perMessageGroupId.

      If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified.

      For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide.

      ", "locationName":"Attribute" } }, "documentation":"

      " }, + "StartMessageMoveTaskRequest":{ + "type":"structure", + "required":["SourceArn"], + "members":{ + "SourceArn":{ + "shape":"String", + "documentation":"

      The ARN of the queue that contains the messages to be moved to another queue. Currently, only dead-letter queue (DLQ) ARNs are accepted.

      " + }, + "DestinationArn":{ + "shape":"String", + "documentation":"

      The ARN of the queue that receives the moved messages. You can use this field to specify the destination queue where you would like to redrive messages. If this field is left blank, the messages will be redriven back to their respective original source queues.

      " + }, + "MaxNumberOfMessagesPerSecond":{ + "shape":"Integer", + "documentation":"

      The number of messages to be moved per second (the message movement rate). You can use this field to define a fixed message movement rate. The maximum value for messages per second is 500. If this field is left blank, the system will optimize the rate based on the queue message backlog size, which may vary throughout the duration of the message movement task.

      " + } + } + }, + "StartMessageMoveTaskResult":{ + "type":"structure", + "members":{ + "TaskHandle":{ + "shape":"String", + "documentation":"

      An identifier associated with a message movement task. You can use this identifier to cancel a specified message movement task using the CancelMessageMoveTask action.

      " + } + } + }, "String":{"type":"string"}, "StringList":{ "type":"list", diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 78d3bf747a43..392a1e44a3d1 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 367d493f1677..c7f089e8bd9b 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 2c30c1ef22c9..249dd45cdbc2 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index d154da7ff429..2ced315b3de4 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index c88d00650730..afcf9eda6e90 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index cc8df7d408a4..8da16eee2592 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 283500f3bca5..a03432cb144d 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 5b2b9712c832..3b740ab3a8c5 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 9307cbf9bf55..365b68f9028f 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/SessionCredentialsHolder.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/SessionCredentialsHolder.java deleted file mode 100644 index 7f6a003e7aeb..000000000000 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/SessionCredentialsHolder.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.sts.auth; - -import java.util.Date; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.services.sts.model.Credentials; - -/** - * Holder class used to atomically store a session with its expiration time. - */ -@SdkInternalApi -@ThreadSafe -final class SessionCredentialsHolder { - - private final AwsSessionCredentials sessionCredentials; - private final Date sessionCredentialsExpiration; - - SessionCredentialsHolder(Credentials credentials) { - this.sessionCredentials = AwsSessionCredentials.create(credentials.accessKeyId(), - credentials.secretAccessKey(), - credentials.sessionToken()); - this.sessionCredentialsExpiration = Date.from(credentials.expiration()); - } - - public AwsSessionCredentials getSessionCredentials() { - return sessionCredentials; - } - - public Date getSessionCredentialsExpiration() { - return sessionCredentialsExpiration; - } -} diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleCredentialsProvider.java index a67ed53b5766..6a7a774b7c5f 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleCredentialsProvider.java @@ -15,15 +15,17 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; + import java.util.function.Consumer; import java.util.function.Supplier; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -65,10 +67,10 @@ public static Builder builder() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { AssumeRoleRequest assumeRoleRequest = assumeRoleRequestSupplier.get(); Validate.notNull(assumeRoleRequest, "Assume role request must not be null."); - return stsClient.assumeRole(assumeRoleRequest).credentials(); + return toAwsSessionCredentials(stsClient.assumeRole(assumeRoleRequest).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithSamlCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithSamlCredentialsProvider.java index 5b80af1b14d6..eac7a0f1a30e 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithSamlCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithSamlCredentialsProvider.java @@ -15,15 +15,17 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; + import java.util.function.Consumer; import java.util.function.Supplier; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.model.AssumeRoleWithSamlRequest; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -66,10 +68,10 @@ public static Builder builder() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { AssumeRoleWithSamlRequest assumeRoleWithSamlRequest = assumeRoleWithSamlRequestSupplier.get(); Validate.notNull(assumeRoleWithSamlRequest, "Assume role with saml request must not be null."); - return stsClient.assumeRoleWithSAML(assumeRoleWithSamlRequest).credentials(); + return toAwsSessionCredentials(stsClient.assumeRoleWithSAML(assumeRoleWithSamlRequest).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithWebIdentityCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithWebIdentityCredentialsProvider.java index de467c27e5a0..f29d7b3e772f 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithWebIdentityCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsAssumeRoleWithWebIdentityCredentialsProvider.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; import static software.amazon.awssdk.utils.Validate.notNull; import java.util.function.Consumer; @@ -23,9 +24,9 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.model.AssumeRoleWithWebIdentityRequest; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -67,10 +68,10 @@ public static Builder builder() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { AssumeRoleWithWebIdentityRequest request = assumeRoleWithWebIdentityRequest.get(); notNull(request, "AssumeRoleWithWebIdentityRequest can't be null"); - return stsClient.assumeRoleWithWebIdentity(request).credentials(); + return toAwsSessionCredentials(stsClient.assumeRoleWithWebIdentity(request).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsCredentialsProvider.java index c09aa0c4dc38..3162bbb62de4 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsCredentialsProvider.java @@ -20,12 +20,12 @@ import java.util.Optional; import java.util.function.Function; import software.amazon.awssdk.annotations.NotThreadSafe; -import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.utils.SdkAutoCloseable; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.CopyableBuilder; @@ -46,8 +46,8 @@ * Users of this provider must {@link #close()} it when they are finished using it. */ @ThreadSafe -@SdkInternalApi -abstract class StsCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { +@SdkPublicApi +public abstract class StsCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { private static final Duration DEFAULT_STALE_TIME = Duration.ofMinutes(1); private static final Duration DEFAULT_PREFETCH_TIME = Duration.ofMinutes(5); @@ -60,20 +60,20 @@ abstract class StsCredentialsProvider implements AwsCredentialsProvider, SdkAuto /** * The session cache that handles automatically updating the credentials when they get close to expiring. */ - private final CachedSupplier sessionCache; + private final CachedSupplier sessionCache; private final Duration staleTime; private final Duration prefetchTime; private final Boolean asyncCredentialUpdateEnabled; - protected StsCredentialsProvider(BaseBuilder builder, String asyncThreadName) { + StsCredentialsProvider(BaseBuilder builder, String asyncThreadName) { this.stsClient = Validate.notNull(builder.stsClient, "STS client must not be null."); this.staleTime = Optional.ofNullable(builder.staleTime).orElse(DEFAULT_STALE_TIME); this.prefetchTime = Optional.ofNullable(builder.prefetchTime).orElse(DEFAULT_PREFETCH_TIME); this.asyncCredentialUpdateEnabled = builder.asyncCredentialUpdateEnabled; - CachedSupplier.Builder cacheBuilder = CachedSupplier.builder(this::updateSessionCredentials); + CachedSupplier.Builder cacheBuilder = CachedSupplier.builder(this::updateSessionCredentials); if (builder.asyncCredentialUpdateEnabled) { cacheBuilder.prefetchStrategy(new NonBlocking(asyncThreadName)); } @@ -84,9 +84,11 @@ protected StsCredentialsProvider(BaseBuilder builder, String asyncThreadNa * Update the expiring session credentials by calling STS. Invoked by {@link CachedSupplier} when the credentials * are close to expiring. */ - private RefreshResult updateSessionCredentials() { - SessionCredentialsHolder credentials = new SessionCredentialsHolder(getUpdatedCredentials(stsClient)); - Instant actualTokenExpiration = credentials.getSessionCredentialsExpiration().toInstant(); + private RefreshResult updateSessionCredentials() { + AwsSessionCredentials credentials = getUpdatedCredentials(stsClient); + Instant actualTokenExpiration = + credentials.expirationTime() + .orElseThrow(() -> new IllegalStateException("Sourced credentials have no expiration value")); return RefreshResult.builder(credentials) .staleTime(actualTokenExpiration.minus(staleTime)) @@ -96,7 +98,7 @@ private RefreshResult updateSessionCredentials() { @Override public AwsCredentials resolveCredentials() { - return sessionCache.get().getSessionCredentials(); + return sessionCache.get(); } @Override @@ -123,13 +125,14 @@ public Duration prefetchTime() { /** * Implemented by a child class to call STS and get a new set of credentials to be used by this provider. */ - protected abstract Credentials getUpdatedCredentials(StsClient stsClient); + abstract AwsSessionCredentials getUpdatedCredentials(StsClient stsClient); /** * Extended by child class's builders to share configuration across credential providers. */ @NotThreadSafe - protected abstract static class BaseBuilder, T extends ToCopyableBuilder> + @SdkPublicApi + public abstract static class BaseBuilder, T extends ToCopyableBuilder> implements CopyableBuilder { private final Function providerConstructor; @@ -138,11 +141,11 @@ protected abstract static class BaseBuilder, T exten private Duration staleTime; private Duration prefetchTime; - protected BaseBuilder(Function providerConstructor) { + BaseBuilder(Function providerConstructor) { this.providerConstructor = providerConstructor; } - public BaseBuilder(Function providerConstructor, StsCredentialsProvider provider) { + BaseBuilder(Function providerConstructor, StsCredentialsProvider provider) { this.providerConstructor = providerConstructor; this.asyncCredentialUpdateEnabled = provider.asyncCredentialUpdateEnabled; this.stsClient = provider.stsClient; diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetFederationTokenCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetFederationTokenCredentialsProvider.java index 4406b0ae47f6..cfa57e2ce047 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetFederationTokenCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetFederationTokenCredentialsProvider.java @@ -15,13 +15,15 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; + import java.util.function.Consumer; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.services.sts.model.GetFederationTokenRequest; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; @@ -64,8 +66,8 @@ public static Builder builder() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { - return stsClient.getFederationToken(getFederationTokenRequest).credentials(); + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { + return toAwsSessionCredentials(stsClient.getFederationToken(getFederationTokenRequest).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetSessionTokenCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetSessionTokenCredentialsProvider.java index ac56c560bef5..1bd62b7cedcf 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetSessionTokenCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsGetSessionTokenCredentialsProvider.java @@ -15,13 +15,15 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; + import java.util.function.Consumer; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.services.sts.StsClient; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.services.sts.model.GetSessionTokenRequest; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; @@ -64,8 +66,8 @@ public static Builder builder() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { - return stsClient.getSessionToken(getSessionTokenRequest).credentials(); + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { + return toAwsSessionCredentials(stsClient.getSessionToken(getSessionTokenRequest).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsWebIdentityTokenFileCredentialsProvider.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsWebIdentityTokenFileCredentialsProvider.java index ce98d97f59d0..23eb51a4dfe8 100644 --- a/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsWebIdentityTokenFileCredentialsProvider.java +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/auth/StsWebIdentityTokenFileCredentialsProvider.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.services.sts.auth; +import static software.amazon.awssdk.services.sts.internal.StsAuthUtils.toAwsSessionCredentials; import static software.amazon.awssdk.utils.StringUtils.trim; import static software.amazon.awssdk.utils.Validate.notNull; @@ -25,12 +26,12 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.auth.credentials.internal.WebIdentityTokenCredentialProperties; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.internal.AssumeRoleWithWebIdentityRequestSupplier; import software.amazon.awssdk.services.sts.model.AssumeRoleWithWebIdentityRequest; -import software.amazon.awssdk.services.sts.model.Credentials; import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -140,10 +141,10 @@ public String toString() { } @Override - protected Credentials getUpdatedCredentials(StsClient stsClient) { + protected AwsSessionCredentials getUpdatedCredentials(StsClient stsClient) { AssumeRoleWithWebIdentityRequest request = assumeRoleWithWebIdentityRequest.get(); notNull(request, "AssumeRoleWithWebIdentityRequest can't be null"); - return stsClient.assumeRoleWithWebIdentity(request).credentials(); + return toAwsSessionCredentials(stsClient.assumeRoleWithWebIdentity(request).credentials()); } @Override diff --git a/services/sts/src/main/java/software/amazon/awssdk/services/sts/internal/StsAuthUtils.java b/services/sts/src/main/java/software/amazon/awssdk/services/sts/internal/StsAuthUtils.java new file mode 100644 index 000000000000..1f02431e3a64 --- /dev/null +++ b/services/sts/src/main/java/software/amazon/awssdk/services/sts/internal/StsAuthUtils.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sts.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.services.sts.model.Credentials; + +@SdkInternalApi +public final class StsAuthUtils { + private StsAuthUtils() { + } + + public static AwsSessionCredentials toAwsSessionCredentials(Credentials credentials) { + return AwsSessionCredentials.builder() + .accessKeyId(credentials.accessKeyId()) + .secretAccessKey(credentials.secretAccessKey()) + .sessionToken(credentials.sessionToken()) + .expirationTime(credentials.expiration()) + .build(); + } +} diff --git a/services/sts/src/main/resources/codegen-resources/endpoint-tests.json b/services/sts/src/main/resources/codegen-resources/endpoint-tests.json index 390976f40c39..b566f4aac4a6 100644 --- a/services/sts/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sts/src/main/resources/codegen-resources/endpoint-tests.json @@ -702,8 +702,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -721,10 +721,10 @@ } ], "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -734,8 +734,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -753,10 +753,10 @@ } ], "params": { - "Region": "ap-south-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -766,8 +766,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -785,10 +785,10 @@ } ], "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { @@ -798,8 +798,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -817,10 +817,10 @@ } ], "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { @@ -830,8 +830,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -849,10 +849,10 @@ } ], "params": { - "Region": "aws-global", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "aws-global" } }, { @@ -862,8 +862,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -881,10 +881,10 @@ } ], "params": { - "Region": "ca-central-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -894,8 +894,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -913,10 +913,10 @@ } ], "params": { - "Region": "eu-central-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -926,8 +926,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -945,10 +945,10 @@ } ], "params": { - "Region": "eu-north-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -958,8 +958,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -977,10 +977,10 @@ } ], "params": { - "Region": "eu-west-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -990,8 +990,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1009,10 +1009,10 @@ } ], "params": { - "Region": "eu-west-2", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -1022,8 +1022,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1041,10 +1041,10 @@ } ], "params": { - "Region": "eu-west-3", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -1054,8 +1054,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1073,10 +1073,10 @@ } ], "params": { - "Region": "sa-east-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -1086,8 +1086,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1105,10 +1105,10 @@ } ], "params": { - "Region": "us-east-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -1118,8 +1118,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1137,10 +1137,10 @@ } ], "params": { - "Region": "us-east-2", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -1150,8 +1150,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1169,10 +1169,10 @@ } ], "params": { - "Region": "us-west-1", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -1182,8 +1182,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-1", + "signingName": "sts", "name": "sigv4" } ] @@ -1201,10 +1201,10 @@ } ], "params": { - "Region": "us-west-2", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -1214,8 +1214,8 @@ "properties": { "authSchemes": [ { - "signingName": "sts", "signingRegion": "us-east-3", + "signingName": "sts", "name": "sigv4" } ] @@ -1233,10 +1233,10 @@ } ], "params": { - "Region": "us-east-3", - "UseFIPS": false, + "UseGlobalEndpoint": true, "UseDualStack": false, - "UseGlobalEndpoint": true + "UseFIPS": false, + "Region": "us-east-3" } }, { @@ -1257,10 +1257,10 @@ } ], "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false, "UseGlobalEndpoint": true, + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-west-1", "Endpoint": "https://example.com" } }, @@ -1273,9 +1273,9 @@ }, "params": { "Endpoint": "https://example.com", - "UseFIPS": false, + "UseGlobalEndpoint": false, "UseDualStack": false, - "UseGlobalEndpoint": false + "UseFIPS": false } } ], diff --git a/services/sts/src/main/resources/codegen-resources/service-2.json b/services/sts/src/main/resources/codegen-resources/service-2.json index f230c9296e12..cb44d617c967 100644 --- a/services/sts/src/main/resources/codegen-resources/service-2.json +++ b/services/sts/src/main/resources/codegen-resources/service-2.json @@ -693,7 +693,8 @@ "SAMLAssertionType":{ "type":"string", "max":100000, - "min":4 + "min":4, + "sensitive":true }, "Subject":{"type":"string"}, "SubjectType":{"type":"string"}, @@ -721,7 +722,10 @@ "min":16, "pattern":"[\\w]*" }, - "accessKeySecretType":{"type":"string"}, + "accessKeySecretType":{ + "type":"string", + "sensitive":true + }, "accountType":{"type":"string"}, "arnType":{ "type":"string", @@ -738,7 +742,8 @@ "clientTokenType":{ "type":"string", "max":20000, - "min":4 + "min":4, + "sensitive":true }, "dateType":{"type":"timestamp"}, "decodedMessageType":{"type":"string"}, diff --git a/services/support/pom.xml b/services/support/pom.xml index 26f72dd6f7eb..cdb6a1655e5c 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index c9fcd01b11cb..c686ea4b9163 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index d66289d64029..897bc89cfcf2 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index c6aad262efc8..a9f9dfdc556e 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index b3deb9995520..bd1133fda932 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 92dd177141bf..167f6d64e4eb 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index e7d9c7c50b25..0f13c9b41855 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/endpoint-tests.json b/services/timestreamwrite/src/main/resources/codegen-resources/endpoint-tests.json index 21efa51ffd86..37439a64a27a 100644 --- a/services/timestreamwrite/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/timestreamwrite/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -151,9 +151,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -164,9 +175,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -177,9 +199,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -190,9 +223,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -203,9 +247,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -216,9 +260,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -230,8 +274,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -241,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -253,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json index 86bfc0adae31..b415b7b1b695 100644 --- a/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json +++ b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json @@ -32,7 +32,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

      Creates a new Timestream batch load task. A batch load task processes data from a CSV source in an S3 location and writes to a Timestream table. A mapping from source to target is defined in a batch load task. Errors and events are written to a report at an S3 location. For the report, if the KMS key is not specified, the batch load task will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.

      ", + "documentation":"

      Creates a new Timestream batch load task. A batch load task processes data from a CSV source in an S3 location and writes to a Timestream table. A mapping from source to target is defined in a batch load task. Errors and events are written to a report at an S3 location. For the report, if the KMS key is not specified, the report will be encrypted with an S3 managed key when SSE_S3 is the option. Otherwise an error is thrown. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.

      ", "endpointdiscovery":{"required":true} }, "CreateDatabase":{ @@ -649,6 +649,10 @@ "MagneticStoreWriteProperties":{ "shape":"MagneticStoreWriteProperties", "documentation":"

      Contains properties to set on the table when enabling magnetic store writes.

      " + }, + "Schema":{ + "shape":"Schema", + "documentation":"

      The schema of the table.

      " } } }, @@ -1164,7 +1168,7 @@ }, "Value":{ "shape":"StringValue2048", - "documentation":"

      The value for the MeasureValue.

      " + "documentation":"

      The value for the MeasureValue. For information, see Data types.

      " }, "Type":{ "shape":"MeasureValueType", @@ -1276,6 +1280,44 @@ "max":20, "min":1 }, + "PartitionKey":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"PartitionKeyType", + "documentation":"

      The type of the partition key. Options are DIMENSION (dimension key) and MEASURE (measure key).

      " + }, + "Name":{ + "shape":"SchemaName", + "documentation":"

      The name of the attribute used for a dimension key.

      " + }, + "EnforcementInRecord":{ + "shape":"PartitionKeyEnforcementLevel", + "documentation":"

      The level of enforcement for the specification of a dimension key in ingested records. Options are REQUIRED (dimension key must be specified) and OPTIONAL (dimension key does not have to be specified).

      " + } + }, + "documentation":"

      An attribute used in partitioning data in a table. A dimension key partitions data using the values of the dimension specified by the dimension-name as partition key, while a measure key partitions data using measure names (values of the 'measure_name' column).

      " + }, + "PartitionKeyEnforcementLevel":{ + "type":"string", + "enum":[ + "REQUIRED", + "OPTIONAL" + ] + }, + "PartitionKeyList":{ + "type":"list", + "member":{"shape":"PartitionKey"}, + "min":1 + }, + "PartitionKeyType":{ + "type":"string", + "enum":[ + "DIMENSION", + "MEASURE" + ] + }, "Record":{ "type":"structure", "members":{ @@ -1293,7 +1335,7 @@ }, "MeasureValueType":{ "shape":"MeasureValueType", - "documentation":"

      Contains the data type of the measure value for the time-series data point. Default type is DOUBLE.

      " + "documentation":"

      Contains the data type of the measure value for the time-series data point. Default type is DOUBLE. For more information, see Data types.

      " }, "Time":{ "shape":"StringValue256", @@ -1512,6 +1554,16 @@ "TIMESTAMP" ] }, + "Schema":{ + "type":"structure", + "members":{ + "CompositePartitionKey":{ + "shape":"PartitionKeyList", + "documentation":"

      A non-empty list of partition keys defining the attributes used to partition the table data. The order of the list determines the partition hierarchy. The name and type of each partition key as well as the partition key order cannot be changed after the table is created. However, the enforcement level of each partition key can be changed.

      " + } + }, + "documentation":"

      A Schema specifies the expected data model of the table.

      " + }, "SchemaName":{ "type":"string", "min":1 @@ -1575,6 +1627,10 @@ "MagneticStoreWriteProperties":{ "shape":"MagneticStoreWriteProperties", "documentation":"

      Contains properties to set on the table when enabling magnetic store writes.

      " + }, + "Schema":{ + "shape":"Schema", + "documentation":"

      The schema of the table.

      " } }, "documentation":"

      Represents a database table in Timestream. Tables contain one or more related time series. You can modify the retention duration of the memory store and the magnetic store for a table.

      " @@ -1738,6 +1794,10 @@ "MagneticStoreWriteProperties":{ "shape":"MagneticStoreWriteProperties", "documentation":"

      Contains properties to set on the table when enabling magnetic store writes.

      " + }, + "Schema":{ + "shape":"Schema", + "documentation":"

      The schema of the table.

      " } } }, diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index 5dad691c7309..e65ae53e6fd0 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index bf711bc19b50..0aacc1a1aa6e 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 52ee40e47a57..12311efb9cfc 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 199cde4489f9..ed3a1d16b74e 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 5baaf6a3d700..cf2b8f4ac6c6 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -1418,6 +1418,10 @@ "WorkflowDetails":{ "shape":"WorkflowDetails", "documentation":"

      Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

      In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

      " + }, + "StructuredLogDestinations":{ + "shape":"StructuredLogDestinations", + "documentation":"

      Specifies the log groups to which your server logs are sent.

      To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:

      arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*

      For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*

      If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:

      update-server --server-id s-1234567890abcdef0 --structured-log-destinations

      " } } }, @@ -2417,6 +2421,10 @@ "WorkflowDetails":{ "shape":"WorkflowDetails", "documentation":"

      Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

      In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

      " + }, + "StructuredLogDestinations":{ + "shape":"StructuredLogDestinations", + "documentation":"

      Specifies the log groups to which your server logs are sent.

      To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:

      arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*

      For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*

      If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:

      update-server --server-id s-1234567890abcdef0 --structured-log-destinations

      " } }, "documentation":"

      Describes the properties of a file transfer protocol-enabled server that was specified.

      " @@ -4250,6 +4258,12 @@ } } }, + "StructuredLogDestinations":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":1, + "min":0 + }, "SubnetId":{"type":"string"}, "SubnetIds":{ "type":"list", @@ -4713,6 +4727,10 @@ "WorkflowDetails":{ "shape":"WorkflowDetails", "documentation":"

      Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

      In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.

      To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

      aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'

      " + }, + "StructuredLogDestinations":{ + "shape":"StructuredLogDestinations", + "documentation":"

      Specifies the log groups to which your server logs are sent.

      To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:

      arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*

      For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*

      If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example:

      update-server --server-id s-1234567890abcdef0 --structured-log-destinations

      " } } }, diff --git a/services/translate/pom.xml b/services/translate/pom.xml index e84980095862..285094ab5dd1 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 translate diff --git a/services/translate/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/translate/src/main/resources/codegen-resources/endpoint-rule-set.json index 3d315dcea2f4..44b476b069c4 100644 --- a/services/translate/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/translate/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://translate-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://translate-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://translate-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://translate.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://translate-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://translate.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://translate.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://translate.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/translate/src/main/resources/codegen-resources/endpoint-tests.json b/services/translate/src/main/resources/codegen-resources/endpoint-tests.json index 6b9d67a5fddb..2984854f21c9 100644 --- a/services/translate/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/translate/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,42 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-south-1.api.aws" + "url": "https://translate.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-south-1.amazonaws.com" + "url": "https://translate.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-south-1.api.aws" + "url": "https://translate.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -47,48 +47,35 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ca-central-1.amazonaws.com" + "url": "https://translate.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ca-central-1.api.aws" + "url": "https://translate.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -99,48 +86,9 @@ } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -151,152 +99,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -307,48 +112,35 @@ } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.eu-west-3.amazonaws.com" + "url": "https://translate.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-3" + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.eu-west-3.api.aws" + "url": "https://translate.eu-west-2.amazonaws.com" } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -359,308 +151,178 @@ } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.eu-west-2.amazonaws.com" + "url": "https://translate.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.eu-west-1.amazonaws.com" + "url": "https://translate-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.eu-west-1.amazonaws.com" + "url": "https://translate.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-northeast-2.amazonaws.com" + "url": "https://translate-fips.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-northeast-2.api.aws" + "url": "https://translate.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-northeast-2.amazonaws.com" + "url": "https://translate.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-northeast-1.api.aws" + "url": "https://translate-fips.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-northeast-1.amazonaws.com" + "url": "https://translate-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate.ap-northeast-1.amazonaws.com" + "url": "https://translate.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-east-1.api.aws" + "url": "https://translate-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-east-1" + "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-east-1.amazonaws.com" + "url": "https://translate-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-east-1" - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate.ap-east-1.amazonaws.com" + "url": "https://translate.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-west-1" - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.us-gov-west-1.api.aws" + "url": "https://translate.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -671,113 +333,87 @@ } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-southeast-1.api.aws" + "url": "https://translate-fips.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-southeast-1.amazonaws.com" + "url": "https://translate-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-southeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.ap-southeast-2.api.aws" + "url": "https://translate-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://translate-fips.ap-southeast-2.amazonaws.com" + "url": "https://translate.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-southeast-2.api.aws" + "url": "https://translate.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.ap-southeast-2.amazonaws.com" + "url": "https://translate.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -786,9 +422,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -799,9 +435,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -810,130 +446,75 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://translate.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.us-east-1.amazonaws.com" + "url": "https://translate-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://translate.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://translate.us-east-1.amazonaws.com" + "url": "https://translate.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://translate-fips.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://translate.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://translate.us-east-2.amazonaws.com" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-2" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -942,7 +523,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -952,9 +532,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -964,11 +544,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/translate/src/main/resources/codegen-resources/service-2.json b/services/translate/src/main/resources/codegen-resources/service-2.json index 01a5e9a7789f..68fa90d5c4cb 100644 --- a/services/translate/src/main/resources/codegen-resources/service-2.json +++ b/services/translate/src/main/resources/codegen-resources/service-2.json @@ -256,6 +256,25 @@ ], "documentation":"

      Associates a specific tag with a resource. A tag is a key-value pair that adds as a metadata to a resource. For more information, see Tagging your resources.

      " }, + "TranslateDocument":{ + "name":"TranslateDocument", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TranslateDocumentRequest"}, + "output":{"shape":"TranslateDocumentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedLanguagePairException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

      Translates the input document from the source language to the target language. This synchronous operation supports plain text or HTML for the input document. TranslateDocument supports translations from English to any supported language, and from any supported language to English. Therefore, specify either the source language code or the target language code as “en” (English).

      TranslateDocument does not support language auto-detection.

      If you set the Formality parameter, the request will fail if the target language does not support formality. For a list of target languages that support formality, see Setting formality.

      " + }, "TranslateText":{ "name":"TranslateText", "http":{ @@ -501,6 +520,29 @@ "zh-TW" ] }, + "Document":{ + "type":"structure", + "required":[ + "Content", + "ContentType" + ], + "members":{ + "Content":{ + "shape":"DocumentContent", + "documentation":"

      The Contentfield type is Binary large object (blob). This object contains the document content converted into base64-encoded binary data. If you use one of the AWS SDKs, the SDK performs the Base64-encoding on this field before sending the request.

      " + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

      Describes the format of the document. You can specify one of the following:

      • text/html - The input data consists of HTML content. Amazon Translate translates only the text in the HTML element.

      • text/plain - The input data consists of unformatted text. Amazon Translate translates every character in the content.

      " + } + }, + "documentation":"

      The content and content type of a document.

      " + }, + "DocumentContent":{ + "type":"blob", + "max":102400, + "sensitive":true + }, "EncryptionKey":{ "type":"structure", "required":[ @@ -1521,7 +1563,7 @@ }, "Settings":{ "shape":"TranslationSettings", - "documentation":"

      Settings that configure the translation output.

      " + "documentation":"

      Settings that modify the translation output.

      " } }, "documentation":"

      Provides information about a translation job.

      " @@ -1548,6 +1590,60 @@ "documentation":"

      You have added too many tags to this resource. The maximum is 50 tags.

      ", "exception":true }, + "TranslateDocumentRequest":{ + "type":"structure", + "required":[ + "Document", + "SourceLanguageCode", + "TargetLanguageCode" + ], + "members":{ + "Document":{ + "shape":"Document", + "documentation":"

      The content and content type for the document to be translated. The document size must not exceed 100 KB.

      " + }, + "TerminologyNames":{ + "shape":"ResourceNameList", + "documentation":"

      The name of a terminology list file to add to the translation job. This file provides source terms and the desired translation for each term. A terminology list can contain a maximum of 256 terms. You can use one custom terminology resource in your translation request.

      Use the ListTerminologies operation to get the available terminology lists.

      For more information about custom terminology lists, see Custom terminology.

      " + }, + "SourceLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

      The language code for the language of the source text. Do not use auto, because TranslateDocument does not support language auto-detection. For a list of supported language codes, see Supported languages.

      " + }, + "TargetLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

      The language code requested for the translated document. For a list of supported language codes, see Supported languages.

      " + }, + "Settings":{"shape":"TranslationSettings"} + } + }, + "TranslateDocumentResponse":{ + "type":"structure", + "required":[ + "TranslatedDocument", + "SourceLanguageCode", + "TargetLanguageCode" + ], + "members":{ + "TranslatedDocument":{ + "shape":"TranslatedDocument", + "documentation":"

      The document containing the translated content. The document format matches the source document format.

      " + }, + "SourceLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

      The language code of the source document.

      " + }, + "TargetLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

      The language code of the translated document.

      " + }, + "AppliedTerminologies":{ + "shape":"AppliedTerminologyList", + "documentation":"

      The names of the custom terminologies applied to the input text by Amazon Translate to produce the translated text document.

      " + }, + "AppliedSettings":{"shape":"TranslationSettings"} + } + }, "TranslateTextRequest":{ "type":"structure", "required":[ @@ -1562,15 +1658,15 @@ }, "TerminologyNames":{ "shape":"ResourceNameList", - "documentation":"

      The name of the terminology list file to be used in the TranslateText request. You can use 1 terminology list at most in a TranslateText request. Terminology lists can contain a maximum of 256 terms.

      " + "documentation":"

      The name of a terminology list file to add to the translation job. This file provides source terms and the desired translation for each term. A terminology list can contain a maximum of 256 terms. You can use one custom terminology resource in your translation request.

      Use the ListTerminologies operation to get the available terminology lists.

      For more information about custom terminology lists, see Custom terminology.

      " }, "SourceLanguageCode":{ "shape":"LanguageCodeString", - "documentation":"

      The language code for the language of the source text. The language must be a language supported by Amazon Translate. For a list of language codes, see Supported languages.

      To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto, Amazon Translate will call Amazon Comprehend to determine the source language.

      If you specify auto, you must send the TranslateText request in a region that supports Amazon Comprehend. Otherwise, the request returns an error indicating that autodetect is not supported.

      " + "documentation":"

      The language code for the language of the source text. For a list of language codes, see Supported languages.

      To have Amazon Translate determine the source language of your text, you can specify auto in the SourceLanguageCode field. If you specify auto, Amazon Translate will call Amazon Comprehend to determine the source language.

      If you specify auto, you must send the TranslateText request in a region that supports Amazon Comprehend. Otherwise, the request returns an error indicating that autodetect is not supported.

      " }, "TargetLanguageCode":{ "shape":"LanguageCodeString", - "documentation":"

      The language code requested for the language of the target text. The language must be a language supported by Amazon Translate.

      " + "documentation":"

      The language code requested for the language of the target text. For a list of language codes, see Supported languages.

      " }, "Settings":{ "shape":"TranslationSettings", @@ -1604,10 +1700,25 @@ }, "AppliedSettings":{ "shape":"TranslationSettings", - "documentation":"

      Settings that configure the translation output.

      " + "documentation":"

      Optional settings that modify the translation output.

      " } } }, + "TranslatedDocument":{ + "type":"structure", + "required":["Content"], + "members":{ + "Content":{ + "shape":"TranslatedDocumentContent", + "documentation":"

      The document containing the translated content.

      " + } + }, + "documentation":"

      The translated content.

      " + }, + "TranslatedDocumentContent":{ + "type":"blob", + "sensitive":true + }, "TranslatedTextString":{ "type":"string", "max":20000, @@ -1625,7 +1736,7 @@ "documentation":"

      Enable the profanity setting if you want Amazon Translate to mask profane words and phrases in your translation output.

      To mask profane words and phrases, Amazon Translate replaces them with the grawlix string “?$#@$“. This 5-character sequence is used for each profane word or phrase, regardless of the length or number of words.

      Amazon Translate doesn't detect profanity in all of its supported languages. For languages that don't support profanity detection, see Unsupported languages in the Amazon Translate Developer Guide.

      If you specify multiple target languages for the job, all the target languages must support profanity masking. If any of the target languages don't support profanity masking, the translation job won't mask profanity for any target language.

      " } }, - "documentation":"

      Optional settings that configure the translation output. Use these settings for real time translations and asynchronous translation jobs.

      " + "documentation":"

      Settings to configure your translation output, including the option to set the formality level of the output text and the option to mask profane words and phrases.

      " }, "UnboundedLengthString":{"type":"string"}, "UnsupportedDisplayLanguageCodeException":{ @@ -1653,7 +1764,7 @@ "documentation":"

      The language code for the language of the translated text.

      " } }, - "documentation":"

      Amazon Translate does not support translation from the language of the source text into the requested target language. For more information, see Error messages.

      ", + "documentation":"

      Amazon Translate does not support translation from the language of the source text into the requested target language. For more information, see Supported languages.

      ", "exception":true }, "UntagResourceRequest":{ diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml new file mode 100644 index 000000000000..6b2251d81e26 --- /dev/null +++ b/services/verifiedpermissions/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.93-SNAPSHOT + + verifiedpermissions + AWS Java SDK :: Services :: Verified Permissions + The AWS Java SDK for Verified Permissions module holds the client classes that are used for + communicating with Verified Permissions. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.verifiedpermissions + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..17b9ad0a91f5 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..722b079cbf01 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseFIPS": false, + "Region": "cn-north-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-east-1.api.aws" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-east-1.amazonaws.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": true, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json b/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..4314d715de41 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListIdentitySources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "identitySources" + }, + "ListPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policies" + }, + "ListPolicyStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyStores" + }, + "ListPolicyTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyTemplates" + } + } +} diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..8b272e444fee --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2507 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-12-01", + "endpointPrefix":"verifiedpermissions", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Amazon Verified Permissions", + "serviceId":"VerifiedPermissions", + "signatureVersion":"v4", + "signingName":"verifiedpermissions", + "targetPrefix":"VerifiedPermissions", + "uid":"verifiedpermissions-2021-12-01" + }, + "operations":{ + "CreateIdentitySource":{ + "name":"CreateIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIdentitySourceInput"}, + "output":{"shape":"CreateIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

      After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

      If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

      To reference a user from this identity source in your Cedar policies, use the following syntax.

      IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

      Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

      ", + "idempotent":true + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyInput"}, + "output":{"shape":"CreatePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

      • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

      • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

      Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

      ", + "idempotent":true + }, + "CreatePolicyStore":{ + "name":"CreatePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyStoreInput"}, + "output":{"shape":"CreatePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a policy store. A policy store is a container for policy resources.

      Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

      ", + "idempotent":true + }, + "CreatePolicyTemplate":{ + "name":"CreatePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyTemplateInput"}, + "output":{"shape":"CreatePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

      ", + "idempotent":true + }, + "DeleteIdentitySource":{ + "name":"DeleteIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentitySourceInput"}, + "output":{"shape":"DeleteIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes an identity source that references an identity provider (IdP) such as Amazon Cognito. After you delete the identity source, you can no longer use tokens for identities from that identity source to represent principals in authorization queries made using IsAuthorizedWithToken. operations.

      ", + "idempotent":true + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyInput"}, + "output":{"shape":"DeletePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes the specified policy from the policy store.

      This operation is idempotent; if you specify a policy that doesn't exist, the request response returns a successful HTTP 200 status code.

      ", + "idempotent":true + }, + "DeletePolicyStore":{ + "name":"DeletePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyStoreInput"}, + "output":{"shape":"DeletePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes the specified policy store.

      This operation is idempotent. If you specify a policy store that does not exist, the request response will still return a successful HTTP 200 status code.

      ", + "idempotent":true + }, + "DeletePolicyTemplate":{ + "name":"DeletePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyTemplateInput"}, + "output":{"shape":"DeletePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Deletes the specified policy template from the policy store.

      This operation also deletes any policies that were created from the specified policy template. Those policies are immediately removed from all future API responses, and are asynchronously deleted from the policy store.

      ", + "idempotent":true + }, + "GetIdentitySource":{ + "name":"GetIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentitySourceInput"}, + "output":{"shape":"GetIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieves the details about the specified identity source.

      " + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyInput"}, + "output":{"shape":"GetPolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieves information about the specified policy.

      " + }, + "GetPolicyStore":{ + "name":"GetPolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyStoreInput"}, + "output":{"shape":"GetPolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieves details about a policy store.

      " + }, + "GetPolicyTemplate":{ + "name":"GetPolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyTemplateInput"}, + "output":{"shape":"GetPolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieve the details for the specified policy template in the specified policy store.

      " + }, + "GetSchema":{ + "name":"GetSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaInput"}, + "output":{"shape":"GetSchemaOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Retrieve the details for the specified schema in the specified policy store.

      " + }, + "IsAuthorized":{ + "name":"IsAuthorized", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IsAuthorizedInput"}, + "output":{"shape":"IsAuthorizedOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Makes an authorization decision about a service request described in the parameters. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

      " + }, + "IsAuthorizedWithToken":{ + "name":"IsAuthorizedWithToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IsAuthorizedWithTokenInput"}, + "output":{"shape":"IsAuthorizedWithTokenOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

      If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

      " + }, + "ListIdentitySources":{ + "name":"ListIdentitySources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitySourcesInput"}, + "output":{"shape":"ListIdentitySourcesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns a paginated list of all of the identity sources defined in the specified policy store.

      " + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPoliciesInput"}, + "output":{"shape":"ListPoliciesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns a paginated list of all policies stored in the specified policy store.

      " + }, + "ListPolicyStores":{ + "name":"ListPolicyStores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyStoresInput"}, + "output":{"shape":"ListPolicyStoresOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns a paginated list of all policy stores in the calling Amazon Web Services account.

      " + }, + "ListPolicyTemplates":{ + "name":"ListPolicyTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyTemplatesInput"}, + "output":{"shape":"ListPolicyTemplatesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Returns a paginated list of all policy templates in the specified policy store.

      " + }, + "PutSchema":{ + "name":"PutSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutSchemaInput"}, + "output":{"shape":"PutSchemaOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

      ", + "idempotent":true + }, + "UpdateIdentitySource":{ + "name":"UpdateIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIdentitySourceInput"}, + "output":{"shape":"UpdateIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

      ", + "idempotent":true + }, + "UpdatePolicy":{ + "name":"UpdatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyInput"}, + "output":{"shape":"UpdatePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

      If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

      ", + "idempotent":true + }, + "UpdatePolicyStore":{ + "name":"UpdatePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyStoreInput"}, + "output":{"shape":"UpdatePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Modifies the validation setting for a policy store.

      ", + "idempotent":true + }, + "UpdatePolicyTemplate":{ + "name":"UpdatePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyTemplateInput"}, + "output":{"shape":"UpdatePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

      Updates the specified policy template. You can update only the description and the some elements of the policyBody.

      Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

      ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

      You don't have sufficient access to perform this action.

      ", + "exception":true + }, + "ActionId":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "ActionIdentifier":{ + "type":"structure", + "required":[ + "actionType", + "actionId" + ], + "members":{ + "actionType":{ + "shape":"ActionType", + "documentation":"

      The type of an action.

      " + }, + "actionId":{ + "shape":"ActionId", + "documentation":"

      The ID of an action.

      " + } + }, + "documentation":"

      Contains information about an action for a request for which an authorization decision is made.

      This data type is used as an request parameter to the IsAuthorized and IsAuthorizedWithToken operations.

      Example: { \"actionId\": \"<action name>\", \"actionType\": \"Action\" }

      " + }, + "ActionType":{ + "type":"string", + "max":200, + "min":1, + "pattern":"Action$|^.+::Action" + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "boolean":{ + "shape":"BooleanAttribute", + "documentation":"

      An attribute value of Boolean type.

      Example: {\"boolean\": true}

      " + }, + "entityIdentifier":{ + "shape":"EntityIdentifier", + "documentation":"

      An attribute value of type EntityIdentifier.

      Example: \"entityIdentifier\": { \"entityId\": \"<id>\", \"entityType\": \"<entity type>\"}

      " + }, + "long":{ + "shape":"LongAttribute", + "documentation":"

      An attribute value of Long type.

      Example: {\"long\": 0}

      " + }, + "string":{ + "shape":"StringAttribute", + "documentation":"

      An attribute value of String type.

      Example: {\"string\": \"abc\"}

      " + }, + "set":{ + "shape":"SetAttribute", + "documentation":"

      An attribute value of Set type.

      Example: {\"set\": [ {} ] }

      " + }, + "record":{ + "shape":"RecordAttribute", + "documentation":"

      An attribute value of Record type.

      Example: {\"record\": { \"keyName\": {} } }

      " + } + }, + "documentation":"

      The value of an attribute.

      Contains information about the runtime context for a request for which an authorization decision is made.

      This data type is used as a member of the ContextDefinition structure which is uses as a request parameter for the IsAuthorized and IsAuthorizedWithToken operations.

      ", + "union":true + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BooleanAttribute":{ + "type":"boolean", + "box":true + }, + "ClientId":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "ClientIds":{ + "type":"list", + "member":{"shape":"ClientId"}, + "max":1000, + "min":0 + }, + "CognitoUserPoolConfiguration":{ + "type":"structure", + "required":["userPoolArn"], + "members":{ + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized.

      Example: \"UserPoolArn\": \"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\"

      " + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

      The unique application client IDs that are associated with the specified Amazon Cognito user pool.

      Example: \"ClientIds\": [\"&ExampleCogClientId;\"]

      " + } + }, + "documentation":"

      The configuration for an identity source that represents a connection to an Amazon Cognito user pool used as an identity provider for Verified Permissions.

      This data type is used as a field that is part of an Configuration structure that is used as a parameter to the Configuration.

      Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}

      " + }, + "Configuration":{ + "type":"structure", + "members":{ + "cognitoUserPoolConfiguration":{ + "shape":"CognitoUserPoolConfiguration", + "documentation":"

      Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs.

      Example: \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}}

      " + } + }, + "documentation":"

      Contains configuration information used when creating a new identity source.

      At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

      You must specify a userPoolArn, and optionally, a ClientId.

      This data type is used as a request parameter for the CreateIdentitySource operation.

      ", + "union":true + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resources" + ], + "members":{ + "message":{"shape":"String"}, + "resources":{ + "shape":"ResourceConflictList", + "documentation":"

      The list of resources referenced with this failed request.

      " + } + }, + "documentation":"

      The request failed because another request to modify a resource occurred at the same.

      ", + "exception":true + }, + "ContextDefinition":{ + "type":"structure", + "members":{ + "contextMap":{ + "shape":"ContextMap", + "documentation":"

      An list of attributes that are needed to successfully evaluate an authorization request. Each attribute in this array must include a map of a data type and its value.

      Example: \"Context\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}

      " + } + }, + "documentation":"

      Contains additional details about the context of the request. Verified Permissions evaluates this information in an authorization request as part of the when and unless clauses in a policy.

      This data type is used as a request parameter for the IsAuthorized and IsAuthorizedWithToken operations.

      Example: \"context\":{\"Context\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}}

      ", + "union":true + }, + "ContextMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"}, + "min":0 + }, + "CreateIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "configuration" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

      If you don't provide this value, then Amazon Web Services generates a random one for you.

      If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

      ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store in which you want to store this identity source. Only policies and requests made using this policy store can reference identities from the identity provider configured in the new identity source.

      " + }, + "configuration":{ + "shape":"Configuration", + "documentation":"

      Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

      At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

      You must specify a UserPoolArn, and optionally, a ClientId.

      " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

      Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source.

      " + } + } + }, + "CreateIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the identity source was originally created.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      The unique ID of the new identity source.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the identity source was most recently updated.

      " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the identity source.

      " + } + } + }, + "CreatePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "definition" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

      If you don't provide this value, then Amazon Web Services generates a random one for you.

      If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

      ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the PolicyStoreId of the policy store you want to store the policy in.

      " + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

      A structure that specifies the policy type and content to use for the new policy. You must include either a static or a templateLinked element. The policy content must be written in the Cedar policy language.

      " + } + } + }, + "CreatePolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the new policy.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      The unique ID of the new policy.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The policy type of the new policy.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal specified in the new policy's scope. This response element isn't present when principal isn't specified in the policy content.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource specified in the new policy's scope. This response element isn't present when the resource isn't specified in the policy content.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy was last updated.

      " + } + } + }, + "CreatePolicyStoreInput":{ + "type":"structure", + "required":["validationSettings"], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

      If you don't provide this value, then Amazon Web Services generates a random one for you.

      If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

      ", + "idempotencyToken":true + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

      Specifies the validation setting for this policy store.

      Currently, the only valid and required value is Mode.

      We recommend that you turn on STRICT mode only after you define a schema. If a schema doesn't exist, then STRICT mode causes any policy to fail validation, and Verified Permissions rejects the policy. You can turn off validation by using the UpdatePolicyStore. Then, when you have a schema defined, use UpdatePolicyStore again to turn validation back on.

      " + } + } + }, + "CreatePolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The unique ID of the new policy store.

      " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon Resource Name (ARN) of the new policy store.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy store was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy store was last updated.

      " + } + } + }, + "CreatePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "statement" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

      Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

      If you don't provide this value, then Amazon Web Services generates a random one for you.

      If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

      ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store in which to create the policy template.

      " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

      Specifies a description for the policy template.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      Specifies the content that you want to use for the new policy template, written in the Cedar policy language.

      " + } + } + }, + "CreatePolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the policy template.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The unique ID of the new policy template.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy template was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy template was most recently updated.

      " + } + } + }, + "Decision":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "DeleteIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the identity source that you want to delete.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      Specifies the ID of the identity source that you want to delete.

      " + } + } + }, + "DeleteIdentitySourceOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy that you want to delete.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      Specifies the ID of the policy that you want to delete.

      " + } + } + }, + "DeletePolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyStoreInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that you want to delete.

      " + } + } + }, + "DeletePolicyStoreOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy template that you want to delete.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      Specifies the ID of the policy template that you want to delete.

      " + } + } + }, + "DeletePolicyTemplateOutput":{ + "type":"structure", + "members":{ + } + }, + "DeterminingPolicyItem":{ + "type":"structure", + "required":["policyId"], + "members":{ + "policyId":{ + "shape":"PolicyId", + "documentation":"

      The Id of a policy that determined to an authorization decision.

      Example: \"policyId\":\"SPEXAMPLEabcdefg111111\"

      " + } + }, + "documentation":"

      Contains information about one of the policies that determined an authorization decision.

      This data type is used as an element in a response parameter for the IsAuthorized and IsAuthorizedWithToken operations.

      Example: \"determiningPolicies\":[{\"policyId\":\"SPEXAMPLEabcdefg111111\"}]

      " + }, + "DeterminingPolicyList":{ + "type":"list", + "member":{"shape":"DeterminingPolicyItem"} + }, + "DiscoveryUrl":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"https://.*" + }, + "EntitiesDefinition":{ + "type":"structure", + "members":{ + "entityList":{ + "shape":"EntityList", + "documentation":"

      An array of entities that are needed to successfully evaluate an authorization request. Each entity in this array must include an identifier for the entity, the attributes of the entity, and a list of any parent entities.

      " + } + }, + "documentation":"

      Contains the list of entities to be considered during an authorization request. This includes all principals, resources, and actions required to successfully evaluate the request.

      This data type is used as a field in the response parameter for the IsAuthorized and IsAuthorizedWithToken operations.

      ", + "union":true + }, + "EntityAttributes":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"}, + "min":0 + }, + "EntityId":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "EntityIdentifier":{ + "type":"structure", + "required":[ + "entityType", + "entityId" + ], + "members":{ + "entityType":{ + "shape":"EntityType", + "documentation":"

      The type of an entity.

      Example: \"entityType\":\"typeName\"

      " + }, + "entityId":{ + "shape":"EntityId", + "documentation":"

      The identifier of an entity.

      \"entityId\":\"identifier\"

      " + } + }, + "documentation":"

      Contains the identifier of an entity, including its ID and type.

      This data type is used as a request parameter for IsAuthorized operation, and as a response parameter for the CreatePolicy, GetPolicy, and UpdatePolicy operations.

      Example: {\"entityId\":\"string\",\"entityType\":\"string\"}

      " + }, + "EntityItem":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"EntityIdentifier", + "documentation":"

      The identifier of the entity.

      " + }, + "attributes":{ + "shape":"EntityAttributes", + "documentation":"

      A list of attributes for the entity.

      " + }, + "parents":{ + "shape":"ParentList", + "documentation":"

      The parents in the hierarchy that contains the entity.

      " + } + }, + "documentation":"

      Contains information about an entity that can be referenced in a Cedar policy.

      This data type is used as one of the fields in the EntitiesDefinition structure.

      { \"id\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"Attributes\": {}, \"Parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

      " + }, + "EntityList":{ + "type":"list", + "member":{"shape":"EntityItem"}, + "min":0 + }, + "EntityReference":{ + "type":"structure", + "members":{ + "unspecified":{ + "shape":"Boolean", + "documentation":"

      Used to indicate that a principal or resource is not specified. This can be used to search for policies that are not associated with a specific principal or resource.

      " + }, + "identifier":{ + "shape":"EntityIdentifier", + "documentation":"

      The identifier of the entity. It can consist of either an EntityType and EntityId, a principal, or a resource.

      " + } + }, + "documentation":"

      Contains information about a principal or resource that can be referenced in a Cedar policy.

      This data type is used as part of the PolicyFilter structure that is used as a request parameter for the ListPolicies operation..

      ", + "union":true + }, + "EntityType":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "EvaluationErrorItem":{ + "type":"structure", + "required":["errorDescription"], + "members":{ + "errorDescription":{ + "shape":"String", + "documentation":"

      The error description.

      " + } + }, + "documentation":"

      Contains a description of an evaluation error.

      This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

      " + }, + "EvaluationErrorList":{ + "type":"list", + "member":{"shape":"EvaluationErrorItem"} + }, + "GetIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the identity source you want information about.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      Specifies the ID of the identity source you want information about.

      " + } + } + }, + "GetIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "details", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId", + "principalEntityType" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the identity source was originally created.

      " + }, + "details":{ + "shape":"IdentitySourceDetails", + "documentation":"

      A structure that describes the configuration of the identity source.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      The ID of the identity source.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the identity source was most recently updated.

      " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the identity source.

      " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

      The data type of principals generated for identities authenticated by this identity source.

      " + } + } + }, + "GetPolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy that you want information about.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      Specifies the ID of the policy you want information about.

      " + } + } + }, + "GetPolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "definition", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the policy that you want information about.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      The unique ID of the policy that you want information about.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The type of the policy.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal specified in the policy's scope. This element isn't included in the response when Principal isn't present in the policy content.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content.

      " + }, + "definition":{ + "shape":"PolicyDefinitionDetail", + "documentation":"

      The definition of the requested policy.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy was last updated.

      " + } + } + }, + "GetPolicyStoreInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that you want information about.

      " + } + } + }, + "GetPolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "validationSettings", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store;

      " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon Resource Name (ARN) of the policy store.

      " + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

      The current validation settings for the policy store.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy store was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy store was last updated.

      " + } + } + }, + "GetPolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy template that you want information about.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      Specifies the ID of the policy template that you want information about.

      " + } + } + }, + "GetPolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "statement", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the policy template.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The ID of the policy template.

      " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

      The description of the policy template.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      The content of the body of the policy template written in the Cedar policy language.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was most recently updated.

      " + } + } + }, + "GetSchemaInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the schema.

      " + } + } + }, + "GetSchemaOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "schema", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the schema.

      " + }, + "schema":{ + "shape":"SchemaJson", + "documentation":"

      The body of the schema, written in Cedar schema JSON.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the schema was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the schema was most recently updated.

      " + } + } + }, + "IdempotencyToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "IdentitySourceDetails":{ + "type":"structure", + "members":{ + "clientIds":{ + "shape":"ClientIds", + "documentation":"

      The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.

      " + }, + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.

      " + }, + "discoveryUrl":{ + "shape":"DiscoveryUrl", + "documentation":"

      The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the Amazon Web Services Region and the user pool identifier with those appropriate for this user pool.

      https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/openid-configuration

      " + }, + "openIdIssuer":{ + "shape":"OpenIdIssuer", + "documentation":"

      A string that identifies the type of OIDC service represented by this identity source.

      At this time, the only valid value is cognito.

      " + } + }, + "documentation":"

      A structure that contains configuration of the identity source.

      This data type is used as a response parameter for the CreateIdentitySource operation.

      " + }, + "IdentitySourceFilter":{ + "type":"structure", + "members":{ + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

      The Cedar entity type of the principals returned by the identity provider (IdP) associated with this identity source.

      " + } + }, + "documentation":"

      A structure that defines characteristics of an identity source that you can use to filter.

      This data type is used as a request parameter for the ListIdentityStores operation.

      " + }, + "IdentitySourceFilters":{ + "type":"list", + "member":{"shape":"IdentitySourceFilter"}, + "max":10, + "min":0 + }, + "IdentitySourceId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "IdentitySourceItem":{ + "type":"structure", + "required":[ + "createdDate", + "details", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId", + "principalEntityType" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the identity source was originally created.

      " + }, + "details":{ + "shape":"IdentitySourceItemDetails", + "documentation":"

      A structure that contains the details of the associated identity provider (IdP).

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      The unique identifier of the identity source.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the identity source was most recently updated.

      " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The identifier of the policy store that contains the identity source.

      " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

      The Cedar entity type of the principals returned from the IdP associated with this identity source.

      " + } + }, + "documentation":"

      A structure that defines an identity source.

      This data type is used as a request parameter for the ListIdentityStores operation.

      " + }, + "IdentitySourceItemDetails":{ + "type":"structure", + "members":{ + "clientIds":{ + "shape":"ClientIds", + "documentation":"

      The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.

      " + }, + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

      The Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.

      " + }, + "discoveryUrl":{ + "shape":"DiscoveryUrl", + "documentation":"

      The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the Amazon Web Services Region and the user pool identifier with those appropriate for this user pool.

      https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/openid-configuration

      " + }, + "openIdIssuer":{ + "shape":"OpenIdIssuer", + "documentation":"

      A string that identifies the type of OIDC service represented by this identity source.

      At this time, the only valid value is cognito.

      " + } + }, + "documentation":"

      A structure that contains configuration of the identity source.

      This data type is used as a response parameter for the CreateIdentitySource operation.

      " + }, + "IdentitySources":{ + "type":"list", + "member":{"shape":"IdentitySourceItem"} + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

      The request failed because of an internal error. Try your request again later

      ", + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "IsAuthorizedInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store. Policies in this policy store will be used to make an authorization decision for the input.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      Specifies the principal for which the authorization decision is to be made.

      " + }, + "action":{ + "shape":"ActionIdentifier", + "documentation":"

      Specifies the requested action to be authorized. For example, is the principal authorized to perform this action on the resource?

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      Specifies the resource for which the authorization decision is to be made.

      " + }, + "context":{ + "shape":"ContextDefinition", + "documentation":"

      Specifies additional context that can be used to make more granular authorization decisions.

      " + }, + "entities":{ + "shape":"EntitiesDefinition", + "documentation":"

      Specifies the list of resources and principals and their associated attributes that Verified Permissions can examine when evaluating the policies.

      You can include only principal and resource entities in this parameter; you can't include actions. You must specify actions in the schema.

      " + } + } + }, + "IsAuthorizedOutput":{ + "type":"structure", + "required":[ + "decision", + "determiningPolicies", + "errors" + ], + "members":{ + "decision":{ + "shape":"Decision", + "documentation":"

      An authorization decision that indicates if the authorization request should be allowed or denied.

      " + }, + "determiningPolicies":{ + "shape":"DeterminingPolicyList", + "documentation":"

      The list of determining policies used to make the authorization decision. For example, if there are two matching policies, where one is a forbid and the other is a permit, then the forbid policy will be the determining policy. In the case of multiple matching permit policies then there would be multiple determining policies. In the case that no policies match, and hence the response is DENY, there would be no determining policies.

      " + }, + "errors":{ + "shape":"EvaluationErrorList", + "documentation":"

      Errors that occurred while making an authorization decision, for example, a policy references an Entity or entity Attribute that does not exist in the slice.

      " + } + } + }, + "IsAuthorizedWithTokenInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store. Policies in this policy store will be used to make an authorization decision for the input.

      " + }, + "identityToken":{ + "shape":"Token", + "documentation":"

      Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

      " + }, + "accessToken":{ + "shape":"Token", + "documentation":"

      Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

      " + }, + "action":{ + "shape":"ActionIdentifier", + "documentation":"

      Specifies the requested action to be authorized. Is the specified principal authorized to perform this action on the specified resource.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      Specifies the resource for which the authorization decision is made. For example, is the principal allowed to perform the action on the resource?

      " + }, + "context":{ + "shape":"ContextDefinition", + "documentation":"

      Specifies additional context that can be used to make more granular authorization decisions.

      " + }, + "entities":{ + "shape":"EntitiesDefinition", + "documentation":"

      Specifies the list of resources and principals and their associated attributes that Verified Permissions can examine when evaluating the policies.

      You can include only principal and resource entities in this parameter; you can't include actions. You must specify actions in the schema.

      " + } + } + }, + "IsAuthorizedWithTokenOutput":{ + "type":"structure", + "required":[ + "decision", + "determiningPolicies", + "errors" + ], + "members":{ + "decision":{ + "shape":"Decision", + "documentation":"

      An authorization decision that indicates if the authorization request should be allowed or denied.

      " + }, + "determiningPolicies":{ + "shape":"DeterminingPolicyList", + "documentation":"

      The list of determining policies used to make the authorization decision. For example, if there are multiple matching policies, where at least one is a forbid policy, then because forbid always overrides permit the forbid policies are the determining policies. If all matching policies are permit policies, then those policies are the determining policies. When no policies match and the response is the default DENY, there are no determining policies.

      " + }, + "errors":{ + "shape":"EvaluationErrorList", + "documentation":"

      Errors that occurred while making an authorization decision. For example, a policy references an entity or entity attribute that does not exist in the slice.

      " + } + } + }, + "ListIdentitySourcesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the identity sources that you want to list.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

      " + }, + "maxResults":{ + "shape":"ListIdentitySourcesMaxResults", + "documentation":"

      Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

      " + }, + "filters":{ + "shape":"IdentitySourceFilters", + "documentation":"

      Specifies characteristics of an identity source that you can use to limit the output to matching identity sources.

      " + } + } + }, + "ListIdentitySourcesMaxResults":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "ListIdentitySourcesOutput":{ + "type":"structure", + "required":["identitySources"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

      " + }, + "identitySources":{ + "shape":"IdentitySources", + "documentation":"

      The list of identity sources stored in the specified policy store.

      " + } + } + }, + "ListPoliciesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store you want to list policies from.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

      " + }, + "filter":{ + "shape":"PolicyFilter", + "documentation":"

      Specifies a filter that limits the response to only policies that match the specified criteria. For example, you list only the policies that reference a specified principal.

      " + } + } + }, + "ListPoliciesOutput":{ + "type":"structure", + "required":["policies"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

      " + }, + "policies":{ + "shape":"PolicyList", + "documentation":"

      Lists all policies that are available in the specified policy store.

      " + } + } + }, + "ListPolicyStoresInput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

      " + } + } + }, + "ListPolicyStoresOutput":{ + "type":"structure", + "required":["policyStores"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

      " + }, + "policyStores":{ + "shape":"PolicyStoreList", + "documentation":"

      The list of policy stores in the account.

      " + } + } + }, + "ListPolicyTemplatesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy templates you want to list.

      " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

      Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

      " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

      " + } + } + }, + "ListPolicyTemplatesOutput":{ + "type":"structure", + "required":["policyTemplates"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

      If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

      " + }, + "policyTemplates":{ + "shape":"PolicyTemplatesList", + "documentation":"

      The list of the policy templates in the specified policy store.

      " + } + } + }, + "LongAttribute":{ + "type":"long", + "box":true + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "Namespace":{ + "type":"string", + "max":100, + "min":1, + "pattern":".*" + }, + "NamespaceList":{ + "type":"list", + "member":{"shape":"Namespace"} + }, + "NextToken":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"[A-Za-z0-9-_=+/\\.]*" + }, + "OpenIdIssuer":{ + "type":"string", + "enum":["COGNITO"] + }, + "ParentList":{ + "type":"list", + "member":{"shape":"EntityIdentifier"}, + "max":100, + "min":0 + }, + "PolicyDefinition":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinition", + "documentation":"

      A structure that describes a static policy. An static policy doesn't use a template or allow placeholders for entities.

      " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinition", + "documentation":"

      A structure that describes a policy that was instantiated from a template. The template can specify placeholders for principal and resource. When you use CreatePolicy to create a policy from a template, you specify the exact principal and resource to use for the instantiated policy.

      " + } + }, + "documentation":"

      A structure that contains the details for a Cedar policy definition. It includes the policy type, a description, and a policy body. This is a top level data type used to create a policy.

      This data type is used as a request parameter for the CreatePolicy operation. This structure must always have either an static or a templateLinked element.

      ", + "union":true + }, + "PolicyDefinitionDetail":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinitionDetail", + "documentation":"

      Information about a static policy that wasn't created with a policy template.

      " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinitionDetail", + "documentation":"

      Information about a template-linked policy that was created by instantiating a policy template.

      " + } + }, + "documentation":"

      A structure that describes a policy definition. It must always have either an static or a templateLinked element.

      This data type is used as a response parameter for the GetPolicy operation.

      ", + "union":true + }, + "PolicyDefinitionItem":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinitionItem", + "documentation":"

      Information about a static policy that wasn't created with a policy template.

      " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinitionItem", + "documentation":"

      Information about a template-linked policy that was created by instantiating a policy template.

      " + } + }, + "documentation":"

      A structure that describes a PolicyDefinintion. It will always have either an StaticPolicy or a TemplateLinkedPolicy element.

      This data type is used as a response parameter for the CreatePolicy and ListPolicies operations.

      ", + "union":true + }, + "PolicyFilter":{ + "type":"structure", + "members":{ + "principal":{ + "shape":"EntityReference", + "documentation":"

      Filters the output to only policies that reference the specified principal.

      " + }, + "resource":{ + "shape":"EntityReference", + "documentation":"

      Filters the output to only policies that reference the specified resource.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      Filters the output to only policies of the specified type.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      Filters the output to only template-linked policies that were instantiated from the specified policy template.

      " + } + }, + "documentation":"

      Contains information about a filter to refine policies returned in a query.

      This data type is used as a response parameter for the ListPolicies operation.

      " + }, + "PolicyId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "definition", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The identifier of the PolicyStore where the policy you want information about is stored.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      The identifier of the policy you want information about.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The type of the policy. This is one of the following values:

      • static

      • templateLinked

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal associated with the policy.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource associated with the policy.

      " + }, + "definition":{ + "shape":"PolicyDefinitionItem", + "documentation":"

      The policy definition of an item in the list of policies returned.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy was created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy was most recently updated.

      " + } + }, + "documentation":"

      Contains information about a policy.

      This data type is used as a response parameter for the ListPolicies operation.

      " + }, + "PolicyList":{ + "type":"list", + "member":{"shape":"PolicyItem"} + }, + "PolicyStatement":{ + "type":"string", + "max":10000, + "min":1 + }, + "PolicyStoreId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyStoreItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The unique identifier of the policy store.

      " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon Resource Name (ARN) of the policy store.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time the policy was created.

      " + } + }, + "documentation":"

      Contains information about a policy store.

      This data type is used as a response parameter for the ListPolicyStores operation.

      " + }, + "PolicyStoreList":{ + "type":"list", + "member":{"shape":"PolicyStoreItem"} + }, + "PolicyTemplateDescription":{ + "type":"string", + "max":150, + "min":0 + }, + "PolicyTemplateId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyTemplateItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The unique identifier of the policy store that contains the template.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The unique identifier of the policy template.

      " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

      The description attached to the policy template.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was most recently updated.

      " + } + }, + "documentation":"

      Contains details about a policy template

      This data type is used as a response parameter for the ListPolicyTemplates operation.

      " + }, + "PolicyTemplatesList":{ + "type":"list", + "member":{"shape":"PolicyTemplateItem"} + }, + "PolicyType":{ + "type":"string", + "enum":[ + "STATIC", + "TEMPLATE_LINKED" + ] + }, + "PrincipalEntityType":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "PutSchemaInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "definition" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store in which to place the schema.

      " + }, + "definition":{ + "shape":"SchemaDefinition", + "documentation":"

      Specifies the definition of the schema to be stored. The schema definition must be written in Cedar schema JSON.

      " + } + } + }, + "PutSchemaOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "namespaces", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The unique ID of the policy store that contains the schema.

      " + }, + "namespaces":{ + "shape":"NamespaceList", + "documentation":"

      Identifies the namespaces of the entities referenced by this schema.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the schema was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the schema was last updated.

      " + } + } + }, + "RecordAttribute":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"} + }, + "ResourceArn":{ + "type":"string", + "max":2500, + "min":1, + "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" + }, + "ResourceConflict":{ + "type":"structure", + "required":[ + "resourceId", + "resourceType" + ], + "members":{ + "resourceId":{ + "shape":"String", + "documentation":"

      The unique identifier of the resource involved in a conflict.

      " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The type of the resource involved in a conflict.

      " + } + }, + "documentation":"

      Contains information about a resource conflict.

      " + }, + "ResourceConflictList":{ + "type":"list", + "member":{"shape":"ResourceConflict"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

      The unique ID of the resource referenced in the failed request.

      " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The resource type of the resource referenced in the failed request.

      " + } + }, + "documentation":"

      The request failed because it references a resource that doesn't exist.

      ", + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "IDENTITY_SOURCE", + "POLICY_STORE", + "POLICY", + "POLICY_TEMPLATE", + "SCHEMA" + ] + }, + "SchemaDefinition":{ + "type":"structure", + "members":{ + "cedarJson":{ + "shape":"SchemaJson", + "documentation":"

      A JSON string representation of the schema supported by applications that use this policy store. For more information, see Policy store schema in the Amazon Verified Permissions User Guide.

      " + } + }, + "documentation":"

      Contains a list of principal types, resource types, and actions that can be specified in policies stored in the same policy store. If the validation mode for the policy store is set to STRICT, then policies that can't be validated by this schema are rejected by Verified Permissions and can't be stored in the policy store.

      ", + "union":true + }, + "SchemaJson":{ + "type":"string", + "max":10000, + "min":1 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

      The unique ID of the resource referenced in the failed request.

      " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

      The resource type of the resource referenced in the failed request.

      " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

      The code for the Amazon Web Service that owns the quota.

      " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

      The quota code recognized by the Amazon Web Services Service Quotas service.

      " + } + }, + "documentation":"

      The request failed because it would cause a service quota to be exceeded.

      ", + "exception":true + }, + "SetAttribute":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "StaticPolicyDefinition":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

      The description of the static policy.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      The policy content of the static policy, written in the Cedar policy language.

      " + } + }, + "documentation":"

      Contains information about a static policy.

      This data type is used as a field that is part of the PolicyDefinitionDetail type.

      " + }, + "StaticPolicyDefinitionDetail":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

      A description of the static policy.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      The content of the static policy written in the Cedar policy language.

      " + } + }, + "documentation":"

      A structure that contains details about a static policy. It includes the description and policy body.

      This data type is used within a PolicyDefinition structure as part of a request parameter for the CreatePolicy operation.

      " + }, + "StaticPolicyDefinitionItem":{ + "type":"structure", + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

      A description of the static policy.

      " + } + }, + "documentation":"

      A structure that contains details about a static policy. It includes the description and policy statement.

      This data type is used within a PolicyDefinition structure as part of a request parameter for the CreatePolicy operation.

      " + }, + "StaticPolicyDescription":{ + "type":"string", + "max":150, + "min":0 + }, + "String":{"type":"string"}, + "StringAttribute":{"type":"string"}, + "TemplateLinkedPolicyDefinition":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The unique identifier of the policy template used to create this policy.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

      " + } + }, + "documentation":"

      Contains information about a policy created by instantiating a policy template.

      " + }, + "TemplateLinkedPolicyDefinitionDetail":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The unique identifier of the policy template used to create this policy.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

      " + } + }, + "documentation":"

      Contains information about a policy that was

      created by instantiating a policy template.

      This

      " + }, + "TemplateLinkedPolicyDefinitionItem":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The unique identifier of the policy template used to create this policy.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

      " + } + }, + "documentation":"

      Contains information about a policy created by instantiating a policy template.

      This

      " + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

      The code for the Amazon Web Service that owns the quota.

      " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

      The quota code recognized by the Amazon Web Services Service Quotas service.

      " + } + }, + "documentation":"

      The request failed because it exceeded a throttling quota.

      ", + "exception":true, + "retryable":{"throttling":true} + }, + "TimestampFormat":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Token":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+" + }, + "UpdateCognitoUserPoolConfiguration":{ + "type":"structure", + "required":["userPoolArn"], + "members":{ + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

      The Amazon Resource Name (ARN) of the Amazon Cognito user pool associated with this identity source.

      " + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

      The client ID of an app client that is configured for the specified Amazon Cognito user pool.

      " + } + }, + "documentation":"

      Contains configuration details of a Amazon Cognito user pool for use with an identity source.

      " + }, + "UpdateConfiguration":{ + "type":"structure", + "members":{ + "cognitoUserPoolConfiguration":{ + "shape":"UpdateCognitoUserPoolConfiguration", + "documentation":"

      Contains configuration details of a Amazon Cognito user pool.

      " + } + }, + "documentation":"

      Contains an updated configuration to replace the configuration in an existing identity source.

      At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

      You must specify a userPoolArn, and optionally, a ClientId.

      ", + "union":true + }, + "UpdateIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId", + "updateConfiguration" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the identity source that you want to update.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      Specifies the ID of the identity source that you want to update.

      " + }, + "updateConfiguration":{ + "shape":"UpdateConfiguration", + "documentation":"

      Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

      At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

      You must specify a userPoolArn, and optionally, a ClientId.

      " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

      Specifies the data type of principals generated for identities authenticated by the identity source.

      " + } + } + }, + "UpdateIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the updated identity source was originally created.

      " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

      The ID of the updated identity source.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the identity source was most recently updated.

      " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the updated identity source.

      " + } + } + }, + "UpdatePolicyDefinition":{ + "type":"structure", + "members":{ + "static":{ + "shape":"UpdateStaticPolicyDefinition", + "documentation":"

      Contains details about the updates to be applied to a static policy.

      " + } + }, + "documentation":"

      Contains information about updates to be applied to a policy.

      This data type is used as a request parameter in the UpdatePolicy operation.

      ", + "union":true + }, + "UpdatePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "definition" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy that you want to update.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      Specifies the ID of the policy that you want to update. To find this value, you can use ListPolicies.

      " + }, + "definition":{ + "shape":"UpdatePolicyDefinition", + "documentation":"

      Specifies the updated policy content that you want to replace on the specified policy. The content must be valid Cedar policy language text.

      You can change only the following elements from the policy definition:

      • The action referenced by the policy.

      • Any conditional clauses, such as when or unless clauses.

      You can't change the following elements:

      • Changing from static to templateLinked.

      • Changing the effect of the policy from permit or forbid.

      • The principal referenced by the policy.

      • The resource referenced by the policy.

      " + } + } + }, + "UpdatePolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the policy that was updated.

      " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

      The ID of the policy that was updated.

      " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

      The type of the policy that was updated.

      " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

      The principal specified in the policy's scope. This element isn't included in the response when Principal isn't present in the policy content.

      " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

      The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy was most recently updated.

      " + } + } + }, + "UpdatePolicyStoreInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "validationSettings" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that you want to update

      " + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

      A structure that defines the validation settings that want to enable for the policy store.

      " + } + } + }, + "UpdatePolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the updated policy store.

      " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon Resource Name (ARN) of the updated policy store.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy store was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy store was most recently updated.

      " + } + } + }, + "UpdatePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "statement" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      Specifies the ID of the policy store that contains the policy template that you want to update.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      Specifies the ID of the policy template that you want to update.

      " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

      Specifies a new description to apply to the policy template.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      Specifies new statement content written in Cedar policy language to replace the current body of the policy template.

      You can change only the following elements of the policy body:

      • The action referenced by the policy template.

      • Any conditional clauses, such as when or unless clauses.

      You can't change the following elements:

      • The effect (permit or forbid) of the policy template.

      • The principal referenced by the policy template.

      • The resource referenced by the policy template.

      " + } + } + }, + "UpdatePolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

      The ID of the policy store that contains the updated policy template.

      " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

      The ID of the updated policy template.

      " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was originally created.

      " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

      The date and time that the policy template was most recently updated.

      " + } + } + }, + "UpdateStaticPolicyDefinition":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

      Specifies the description to be added to or replaced on the static policy.

      " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

      Specifies the Cedar policy language text to be added to or replaced on the static policy.

      You can change only the following elements from the original content:

      • The action referenced by the policy.

      • Any conditional clauses, such as when or unless clauses.

      You can't change the following elements:

      • Changing from StaticPolicy to TemplateLinkedPolicy.

      • The effect (permit or forbid) of the policy.

      • The principal referenced by the policy.

      • The resource referenced by the policy.

      " + } + }, + "documentation":"

      Contains information about an update to a static policy.

      " + }, + "UserPoolArn":{ + "type":"string", + "max":255, + "min":1, + "pattern":"arn:[a-zA-Z0-9-]+:cognito-idp:(([a-zA-Z0-9-]+:\\d{12}:userpool/[\\w-]+_[0-9a-zA-Z]+))" + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

      The list of fields that aren't valid.

      " + } + }, + "documentation":"

      The request failed because one or more input parameters don't satisfy their constraint requirements. The output is provided as a list of fields and a reason for each field that isn't valid.

      The possible reasons include the following:

      • UnrecognizedEntityType

        The policy includes an entity type that isn't found in the schema.

      • UnrecognizedActionId

        The policy includes an action id that isn't found in the schema.

      • InvalidActionApplication

        The policy includes an action that, according to the schema, doesn't support the specified principal and resource.

      • UnexpectedType

        The policy included an operand that isn't a valid type for the specified operation.

      • IncompatibleTypes

        The types of elements included in a set, or the types of expressions used in an if...then...else clause aren't compatible in this context.

      • MissingAttribute

        The policy attempts to access a record or entity attribute that isn't specified in the schema. Test for the existence of the attribute first before attempting to access its value. For more information, see the has (presence of attribute test) operator in the Cedar Policy Language Guide.

      • UnsafeOptionalAttributeAccess

        The policy attempts to access a record or entity attribute that is optional and isn't guaranteed to be present. Test for the existence of the attribute first before attempting to access its value. For more information, see the has (presence of attribute test) operator in the Cedar Policy Language Guide.

      • ImpossiblePolicy

        Cedar has determined that a policy condition always evaluates to false. If the policy is always false, it can never apply to any query, and so it can never affect an authorization decision.

      • WrongNumberArguments

        The policy references an extension type with the wrong number of arguments.

      • FunctionArgumentValidationError

        Cedar couldn't parse the argument passed to an extension type. For example, a string that is to be parsed as an IPv4 address can contain only digits and the period character.

      ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "path", + "message" + ], + "members":{ + "path":{ + "shape":"String", + "documentation":"

      The path to the specific element that Verified Permissions found to be not valid.

      " + }, + "message":{ + "shape":"String", + "documentation":"

      Describes the policy validation error.

      " + } + }, + "documentation":"

      Details about a field that failed policy validation.

      " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationMode":{ + "type":"string", + "enum":[ + "OFF", + "STRICT" + ] + }, + "ValidationSettings":{ + "type":"structure", + "required":["mode"], + "members":{ + "mode":{ + "shape":"ValidationMode", + "documentation":"

      The validation mode currently configured for this policy store. The valid values are:

      • OFF – Neither Verified Permissions nor Cedar perform any validation on policies. No validation errors are reported by either service.

      • STRICT – Requires a schema to be present in the policy store. Cedar performs validation on all submitted new or updated static policies and policy templates. Any that fail validation are rejected and Cedar doesn't store them in the policy store.

      If Mode=STRICT and the policy store doesn't contain a schema, Verified Permissions rejects all static policies and policy templates because there is no schema to validate against.

      To submit a static policy or policy template without a schema, you must turn off validation.

      " + } + }, + "documentation":"

      A structure that contains Cedar policy validation settings for the policy store. The validation mode determines which validation failures that Cedar considers serious enough to block acceptance of a new or edited static policy or policy template.

      This data type is used as a request parameter in the CreatePolicyStore and UpdatePolicyStore operations.

      " + } + }, + "documentation":"

      Amazon Verified Permissions is a permissions management service from Amazon Web Services. You can use Verified Permissions to manage permissions for your application, and authorize user access based on those permissions. Using Verified Permissions, application developers can grant access based on information about the users, resources, and requested actions. You can also evaluate additional information like group membership, attributes of the resources, and session context, such as time of request and IP addresses. Verified Permissions manages these permissions by letting you create and store authorization policies for your applications, such as consumer-facing web sites and enterprise business systems.

      Verified Permissions uses Cedar as the policy language to express your permission requirements. Cedar supports both role-based access control (RBAC) and attribute-based access control (ABAC) authorization models.

      For more information about configuring, administering, and using Amazon Verified Permissions in your applications, see the Amazon Verified Permissions User Guide.

      For more information about the Cedar policy language, see the Cedar Policy Language Guide.

      When you write Cedar policies that reference principals, resources and actions, you can define the unique identifiers used for each of those elements. We strongly recommend that you follow these best practices:

      • Use values like universally unique identifiers (UUIDs) for all principal and resource identifiers.

        For example, if user jane leaves the company, and you later let someone else use the name jane, then that new user automatically gets access to everything granted by policies that still reference User::\"jane\". Cedar can’t distinguish between the new user and the old. This applies to both principal and resource identifiers. Always use identifiers that are guaranteed unique and never reused to ensure that you don’t unintentionally grant access because of the presence of an old identifier in a policy.

        Where you use a UUID for an entity, we recommend that you follow it with the // comment specifier and the ‘friendly’ name of your entity. This helps to make your policies easier to understand. For example: principal == User::\"a1b2c3d4-e5f6-a1b2-c3d4-EXAMPLE11111\", // alice

      • Do not include personally identifying, confidential, or sensitive information as part of the unique identifier for your principals or resources. These identifiers are included in log entries shared in CloudTrail trails.

      Several operations return structures that appear similar, but have different purposes. As new functionality is added to the product, the structure used in a parameter of one operation might need to change in a way that wouldn't make sense for the same parameter in a different operation. To help you understand the purpose of each, the following naming convention is used for the structures:

      • Parameter type structures that end in Detail are used in Get operations.

      • Parameter type structures that end in Item are used in List operations.

      • Parameter type structures that use neither suffix are used in the mutating (create and update) operations.

      " +} diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 40d0d048d9f4..7fe0277df9c1 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 0997609296ea..667331ae77d9 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 309640e40511..4ccc751d5211 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 3607b9065bbb..e0526640888a 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index b80004be22dd..d251b2c42e75 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -285,6 +285,35 @@ ], "documentation":"

      Deletes the specified WebACL.

      You can only use this if ManagedByFirewallManager is false in the specified WebACL.

      Before deleting any web ACL, first disassociate it from all resources.

      • To retrieve a list of the resources that are associated with a web ACL, use the following calls:

      • To disassociate a resource from a web ACL, use the following calls:

        • For regional resources, call DisassociateWebACL.

        • For Amazon CloudFront distributions, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution in the Amazon CloudFront API Reference.

      " }, + "DescribeAllManagedProducts":{ + "name":"DescribeAllManagedProducts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAllManagedProductsRequest"}, + "output":{"shape":"DescribeAllManagedProductsResponse"}, + "errors":[ + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInternalErrorException"} + ], + "documentation":"

      Provides high-level information for the Amazon Web Services Managed Rules rule groups and Amazon Web Services Marketplace managed rule groups.

      " + }, + "DescribeManagedProductsByVendor":{ + "name":"DescribeManagedProductsByVendor", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeManagedProductsByVendorRequest"}, + "output":{"shape":"DescribeManagedProductsByVendorResponse"}, + "errors":[ + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"} + ], + "documentation":"

      Provides high-level information for the managed rule groups owned by a specific vendor.

      " + }, "DescribeManagedRuleGroup":{ "name":"DescribeManagedRuleGroup", "http":{ @@ -947,6 +976,37 @@ "type":"integer", "min":0 }, + "AWSManagedRulesACFPRuleSet":{ + "type":"structure", + "required":[ + "CreationPath", + "RegistrationPagePath", + "RequestInspection" + ], + "members":{ + "CreationPath":{ + "shape":"CreationPathString", + "documentation":"

      The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST requests.

      For example, for the URL https://example.com/web/signup, you would provide the path /web/signup.

      " + }, + "RegistrationPagePath":{ + "shape":"RegistrationPagePathString", + "documentation":"

      The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users.

      This page must accept GET text/html requests.

      For example, for the URL https://example.com/web/register, you would provide the path /web/register.

      " + }, + "RequestInspection":{ + "shape":"RequestInspectionACFP", + "documentation":"

      The criteria for inspecting account creation requests, used by the ACFP rule group to validate and track account creation attempts.

      " + }, + "ResponseInspection":{ + "shape":"ResponseInspection", + "documentation":"

      The criteria for inspecting responses to account creation requests, used by the ACFP rule group to track account creation success rates.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      The ACFP rule group evaluates the responses that your protected resources send back to client account creation attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that have had too many successful account creation attempts in a short amount of time.

      " + }, + "EnableRegexInPath":{ + "shape":"Boolean", + "documentation":"

      Allow the use of regular expressions in the registration page path and the account creation path.

      " + } + }, + "documentation":"

      Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

      " + }, "AWSManagedRulesATPRuleSet":{ "type":"structure", "required":["LoginPath"], @@ -961,7 +1021,11 @@ }, "ResponseInspection":{ "shape":"ResponseInspection", - "documentation":"

      The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

      The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that submit too many failed login attempts in a short amount of time.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " + "documentation":"

      The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts for each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that have had too many failed login attempts in a short amount of time.

      " + }, + "EnableRegexInPath":{ + "shape":"Boolean", + "documentation":"

      Allow the use of regular expressions in the login page path.

      " } }, "documentation":"

      Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

      " @@ -1000,17 +1064,32 @@ "EXCLUDED_AS_COUNT" ] }, + "AddressField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

      The name of a single primary address field.

      How you specify the address fields depends on the request inspection payload type.

      • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"primaryaddressline1\": \"THE_ADDRESS1\", \"primaryaddressline2\": \"THE_ADDRESS2\", \"primaryaddressline3\": \"THE_ADDRESS3\" } }, the address field idenfiers are /form/primaryaddressline1, /form/primaryaddressline2, and /form/primaryaddressline3.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named primaryaddressline1, primaryaddressline2, and primaryaddressline3, the address fields identifiers are primaryaddressline1, primaryaddressline2, and primaryaddressline3.

      " + } + }, + "documentation":"

      The name of a field in the request payload that contains part or all of your customer's primary physical address.

      This data type is used in the RequestInspectionACFP data type.

      " + }, + "AddressFields":{ + "type":"list", + "member":{"shape":"AddressField"} + }, "All":{ "type":"structure", "members":{ }, - "documentation":"

      Inspect all of the elements that WAF has parsed and extracted from the web request component that you've identified in your FieldToMatch specifications.

      This is used only in the FieldToMatch specification for some web request component types.

      JSON specification: \"All\": {}

      " + "documentation":"

      Inspect all of the elements that WAF has parsed and extracted from the web request component that you've identified in your FieldToMatch specifications.

      This is used in the FieldToMatch specification for some web request component types.

      JSON specification: \"All\": {}

      " }, "AllQueryArguments":{ "type":"structure", "members":{ }, - "documentation":"

      Inspect all query arguments of the web request.

      This is used only in the FieldToMatch specification for some web request component types.

      JSON specification: \"AllQueryArguments\": {}

      " + "documentation":"

      Inspect all query arguments of the web request.

      This is used in the FieldToMatch specification for some web request component types.

      JSON specification: \"AllQueryArguments\": {}

      " }, "AllowAction":{ "type":"structure", @@ -1084,7 +1163,7 @@ "members":{ "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

      What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

      The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the body normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

      Default: CONTINUE

      " + "documentation":"

      What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

      The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

      Default: CONTINUE

      " } }, "documentation":"

      Inspect the body of the web request. The body immediately follows the request headers.

      This is used to indicate the web request component to inspect, in the FieldToMatch specification.

      " @@ -1109,7 +1188,7 @@ "members":{ "SearchString":{ "shape":"SearchString", - "documentation":"

      A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes.

      Valid values depend on the component that you specify for inspection in FieldToMatch:

      • Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request.

      • UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg.

      If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

      If you're using the WAF API

      Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 200 bytes.

      For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString.

      If you're using the CLI or one of the Amazon Web Services SDKs

      The value that you want WAF to search for. The SDK automatically base64 encodes the value.

      " + "documentation":"

      A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes.

      Valid values depend on the component that you specify for inspection in FieldToMatch:

      • Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request.

      • UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg.

      • HeaderOrder: The comma-separated list of header names to match for. WAF creates a string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.

      If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.

      If you're using the WAF API

      Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 200 bytes.

      For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString.

      If you're using the CLI or one of the Amazon Web Services SDKs

      The value that you want WAF to search for. The SDK automatically base64 encodes the value.

      " }, "FieldToMatch":{ "shape":"FieldToMatch", @@ -1308,7 +1387,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

      What WAF should do if the cookies of the request are larger than WAF can inspect. WAF does not support inspecting the entire contents of request cookies when they exceed 8 KB (8192 bytes) or 200 total cookies. The underlying host service forwards a maximum of 200 cookies and at most 8 KB of cookie contents to WAF.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the cookies normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      " + "documentation":"

      What WAF should do if the cookies of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request cookies when they exceed 8 KB (8192 bytes) or 200 total cookies. The underlying host service forwards a maximum of 200 cookies and at most 8 KB of cookie contents to WAF.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the available cookies normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      " } }, "documentation":"

      Inspect the cookies in the web request. You can specify the parts of the cookies to inspect and you can narrow the set of cookies to inspect by including or excluding specific keys.

      This is used to indicate the web request component to inspect, in the FieldToMatch specification.

      Example JSON: \"Cookies\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"KEY\", \"OversizeHandling\": \"MATCH\" }

      " @@ -1813,6 +1892,12 @@ } } }, + "CreationPathString":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, "CustomHTTPHeader":{ "type":"structure", "required":[ @@ -2100,6 +2185,51 @@ "members":{ } }, + "DescribeAllManagedProductsRequest":{ + "type":"structure", + "required":["Scope"], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

      Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

      To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

      • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

      • API and SDKs - For all calls, use the Region endpoint us-east-1.

      " + } + } + }, + "DescribeAllManagedProductsResponse":{ + "type":"structure", + "members":{ + "ManagedProducts":{ + "shape":"ManagedProductDescriptors", + "documentation":"

      High-level information for the Amazon Web Services Managed Rules rule groups and Amazon Web Services Marketplace managed rule groups.

      " + } + } + }, + "DescribeManagedProductsByVendorRequest":{ + "type":"structure", + "required":[ + "VendorName", + "Scope" + ], + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " + }, + "Scope":{ + "shape":"Scope", + "documentation":"

      Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

      To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

      • CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1.

      • API and SDKs - For all calls, use the Region endpoint us-east-1.

      " + } + } + }, + "DescribeManagedProductsByVendorResponse":{ + "type":"structure", + "members":{ + "ManagedProducts":{ + "shape":"ManagedProductDescriptors", + "documentation":"

      High-level information for the managed rule groups owned by the specified vendor.

      " + } + } + }, "DescribeManagedRuleGroupRequest":{ "type":"structure", "required":[ @@ -2110,7 +2240,7 @@ "members":{ "VendorName":{ "shape":"VendorName", - "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

      " + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " }, "Name":{ "shape":"EntityName", @@ -2135,7 +2265,7 @@ }, "SnsTopicArn":{ "shape":"ResourceArn", - "documentation":"

      The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide.

      " + "documentation":"

      The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to provide notification of changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide.

      " }, "Capacity":{ "shape":"CapacityUnit", @@ -2175,6 +2305,17 @@ } }, "DownloadUrl":{"type":"string"}, + "EmailField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

      The name of the email field.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"email\": \"THE_EMAIL\" } }, the email field specification is /form/email.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named email1, the email field specification is email1.

      " + } + }, + "documentation":"

      The name of the field in the request payload that contains your customer's email.

      This data type is used in the RequestInspectionACFP data type.

      " + }, "EntityDescription":{ "type":"string", "max":256, @@ -2286,6 +2427,10 @@ "Cookies":{ "shape":"Cookies", "documentation":"

      Inspect the request cookies. You must configure scope and pattern matching filters in the Cookies object, to define the set of cookies and the parts of the cookies that WAF inspects.

      Only the first 8 KB (8192 bytes) of a request's cookies and only the first 200 cookies are forwarded to WAF for inspection by the underlying host service. You must configure how to handle any oversize cookie content in the Cookies object. WAF applies the pattern matching filters to the cookies that it receives from the underlying host service.

      " + }, + "HeaderOrder":{ + "shape":"HeaderOrder", + "documentation":"

      Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

      " } }, "documentation":"

      The part of the web request that you want WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of the web request, create a separate rule statement for each component.

      Example JSON for a QueryString field to match:

      \"FieldToMatch\": { \"QueryString\": {} }

      Example JSON for a Method field to match specification:

      \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }

      " @@ -2841,7 +2986,7 @@ }, "ApplicationIntegrationURL":{ "shape":"OutputUrl", - "documentation":"

      The URL to use in SDK integrations with Amazon Web Services managed rule groups. For example, you can use the integration SDKs with the account takeover prevention managed rule group AWSManagedRulesATPRuleSet. This is only populated if you are using a rule group in your web ACL that integrates with your applications in this way. For more information, see WAF client application integration in the WAF Developer Guide.

      " + "documentation":"

      The URL to use in SDK integrations with Amazon Web Services managed rule groups. For example, you can use the integration SDKs with the account takeover prevention managed rule group AWSManagedRulesATPRuleSet and the account creation fraud prevention managed rule group AWSManagedRulesACFPRuleSet. This is only populated if you are using a rule group in your web ACL that integrates with your applications in this way. For more information, see WAF client application integration in the WAF Developer Guide.

      " } } }, @@ -2920,6 +3065,17 @@ "max":199, "min":1 }, + "HeaderOrder":{ + "type":"structure", + "required":["OversizeHandling"], + "members":{ + "OversizeHandling":{ + "shape":"OversizeHandling", + "documentation":"

      What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      " + } + }, + "documentation":"

      Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

      " + }, "HeaderValue":{"type":"string"}, "Headers":{ "type":"structure", @@ -2939,7 +3095,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

      What WAF should do if the headers of the request are larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the headers normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      " + "documentation":"

      What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      " } }, "documentation":"

      Inspect all headers in the web request. You can specify the parts of the headers to inspect and you can narrow the set of headers to inspect by including or excluding specific keys.

      This is used to indicate the web request component to inspect, in the FieldToMatch specification.

      If you want to inspect just the value of a single header, use the SingleHeader FieldToMatch setting instead.

      Example JSON: \"Headers\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"KEY\", \"OversizeHandling\": \"MATCH\" }

      " @@ -3106,7 +3262,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

      What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

      The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the body normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

      Default: CONTINUE

      " + "documentation":"

      What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

      The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

      The options for oversize handling are the following:

      • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

      • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

      • NO_MATCH - Treat the web request as not matching the rule statement.

      You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

      Default: CONTINUE

      " } }, "documentation":"

      Inspect the body of the web request as JSON. The body immediately follows the request headers.

      This is used to indicate the web request component to inspect, in the FieldToMatch specification.

      Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON that result from the matches that you indicate.

      Example JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }

      " @@ -3272,7 +3428,7 @@ "members":{ "VendorName":{ "shape":"VendorName", - "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

      " + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " }, "Name":{ "shape":"EntityName", @@ -3645,7 +3801,7 @@ }, "RedactedFields":{ "shape":"RedactedFields", - "documentation":"

      The parts of the request that you want to keep out of the logs. For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED.

      You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, Method, and JsonBody.

      " + "documentation":"

      The parts of the request that you want to keep out of the logs.

      For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting.

      Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch.

      You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method.

      " }, "ManagedByFirewallManager":{ "shape":"Boolean", @@ -3686,6 +3842,52 @@ "min":1, "pattern":".*\\S.*" }, + "ManagedProductDescriptor":{ + "type":"structure", + "members":{ + "VendorName":{ + "shape":"VendorName", + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " + }, + "ManagedRuleSetName":{ + "shape":"EntityName", + "documentation":"

      The name of the managed rule group. For example, AWSManagedRulesAnonymousIpList or AWSManagedRulesATPRuleSet.

      " + }, + "ProductId":{ + "shape":"ProductId", + "documentation":"

      A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.

      " + }, + "ProductLink":{ + "shape":"ProductLink", + "documentation":"

      For Amazon Web Services Marketplace managed rule groups only, the link to the rule group product page.

      " + }, + "ProductTitle":{ + "shape":"ProductTitle", + "documentation":"

      The display name for the managed rule group. For example, Anonymous IP list or Account takeover prevention.

      " + }, + "ProductDescription":{ + "shape":"ProductDescription", + "documentation":"

      A short description of the managed rule group.

      " + }, + "SnsTopicArn":{ + "shape":"ResourceArn", + "documentation":"

      The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to provide notification of changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide.

      " + }, + "IsVersioningSupported":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the rule group is versioned.

      " + }, + "IsAdvancedManagedRuleSet":{ + "shape":"Boolean", + "documentation":"

      Indicates whether the rule group provides an advanced set of protections, such as the the Amazon Web Services Managed Rules rule groups that are used for WAF intelligent threat mitigation.

      " + } + }, + "documentation":"

      The properties of a managed product, such as an Amazon Web Services Managed Rules rule group or an Amazon Web Services Marketplace managed rule group.

      " + }, + "ManagedProductDescriptors":{ + "type":"list", + "member":{"shape":"ManagedProductDescriptor"} + }, "ManagedRuleGroupConfig":{ "type":"structure", "members":{ @@ -3697,19 +3899,19 @@ }, "PayloadType":{ "shape":"PayloadType", - "documentation":"

      Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

      ", + "documentation":"

      Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

      ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection PayloadType" }, "UsernameField":{ "shape":"UsernameField", - "documentation":"

      Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

      ", + "documentation":"

      Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

      ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection UsernameField" }, "PasswordField":{ "shape":"PasswordField", - "documentation":"

      Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

      ", + "documentation":"

      Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

      ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection PasswordField" }, @@ -3720,9 +3922,13 @@ "AWSManagedRulesATPRuleSet":{ "shape":"AWSManagedRulesATPRuleSet", "documentation":"

      Additional configuration for using the account takeover prevention (ATP) managed rule group, AWSManagedRulesATPRuleSet. Use this to provide login request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to login requests.

      This configuration replaces the individual configuration fields in ManagedRuleGroupConfig and provides additional feature configuration.

      For information about using the ATP managed rule group, see WAF Fraud Control account takeover prevention (ATP) rule group and WAF Fraud Control account takeover prevention (ATP) in the WAF Developer Guide.

      " + }, + "AWSManagedRulesACFPRuleSet":{ + "shape":"AWSManagedRulesACFPRuleSet", + "documentation":"

      Additional configuration for using the account creation fraud prevention (ACFP) managed rule group, AWSManagedRulesACFPRuleSet. Use this to provide account creation request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to account creation requests.

      For information about using the ACFP managed rule group, see WAF Fraud Control account creation fraud prevention (ACFP) rule group and WAF Fraud Control account creation fraud prevention (ACFP) in the WAF Developer Guide.

      " } }, - "documentation":"

      Additional information that's used by a managed rule group. Many managed rule groups don't require this.

      Use the AWSManagedRulesATPRuleSet configuration object for the account takeover prevention managed rule group, to provide information such as the sign-in page of your application and the type of content to accept or reject from the client.

      Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

      For example specifications, see the examples section of CreateWebACL.

      " + "documentation":"

      Additional information that's used by a managed rule group. Many managed rule groups don't require this.

      The rule groups used for intelligent threat mitigation require additional configuration:

      • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

      • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

      • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

      For example specifications, see the examples section of CreateWebACL.

      " }, "ManagedRuleGroupConfigs":{ "type":"list", @@ -3737,7 +3943,7 @@ "members":{ "VendorName":{ "shape":"VendorName", - "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

      " + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " }, "Name":{ "shape":"EntityName", @@ -3757,14 +3963,14 @@ }, "ManagedRuleGroupConfigs":{ "shape":"ManagedRuleGroupConfigs", - "documentation":"

      Additional information that's used by a managed rule group. Many managed rule groups don't require this.

      Use the AWSManagedRulesATPRuleSet configuration object for the account takeover prevention managed rule group, to provide information such as the sign-in page of your application and the type of content to accept or reject from the client.

      Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

      " + "documentation":"

      Additional information that's used by a managed rule group. Many managed rule groups don't require this.

      The rule groups used for intelligent threat mitigation require additional configuration:

      • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

      • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

      • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

      " }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", "documentation":"

      Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

      You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

      " } }, - "documentation":"

      A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

      You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

      You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet or the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet. For more information, see WAF Pricing.

      " + "documentation":"

      A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

      You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

      You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

      " }, "ManagedRuleGroupSummaries":{ "type":"list", @@ -3775,7 +3981,7 @@ "members":{ "VendorName":{ "shape":"VendorName", - "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.

      " + "documentation":"

      The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.

      " }, "Name":{ "shape":"EntityName", @@ -3790,7 +3996,7 @@ "documentation":"

      The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Amazon Web Services Marketplace seller who manages it.

      " } }, - "documentation":"

      High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Amazon Web Services Marketplace managed rule groups, which you can subscribe to through Amazon Web Services Marketplace.

      " + "documentation":"

      High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups and Amazon Web Services Marketplace managed rule groups. To use any Amazon Web Services Marketplace managed rule group, first subscribe to the rule group through Amazon Web Services Marketplace.

      " }, "ManagedRuleGroupVersion":{ "type":"structure", @@ -3925,7 +4131,7 @@ "type":"structure", "members":{ }, - "documentation":"

      Inspect the HTTP method of the web request. The method indicates the type of operation that the request is asking the origin to perform.

      This is used only in the FieldToMatch specification for some web request component types.

      JSON specification: \"Method\": {}

      " + "documentation":"

      Inspect the HTTP method of the web request. The method indicates the type of operation that the request is asking the origin to perform.

      This is used in the FieldToMatch specification for some web request component types.

      JSON specification: \"Method\": {}

      " }, "MetricName":{ "type":"string", @@ -4088,7 +4294,8 @@ "ATP_RULE_SET_RESPONSE_INSPECTION", "ASSOCIATED_RESOURCE_TYPE", "SCOPE_DOWN", - "CUSTOM_KEYS" + "CUSTOM_KEYS", + "ACP_RULE_SET_RESPONSE_INSPECTION" ] }, "ParameterExceptionParameter":{ @@ -4101,10 +4308,10 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

      The name of the password field. For example /form/password.

      " + "documentation":"

      The name of the password field.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named password1, the password field specification is password1.

      " } }, - "documentation":"

      Details about your login page password field for request inspection, used in the AWSManagedRulesATPRuleSet RequestInspection configuration.

      " + "documentation":"

      The name of the field in the request payload that contains your customer's password.

      This data type is used in the RequestInspection and RequestInspectionACFP data types.

      " }, "PayloadType":{ "type":"string", @@ -4113,6 +4320,21 @@ "FORM_ENCODED" ] }, + "PhoneNumberField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

      The name of a single primary phone number field.

      How you specify the phone number fields depends on the request inspection payload type.

      • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"primaryphoneline1\": \"THE_PHONE1\", \"primaryphoneline2\": \"THE_PHONE2\", \"primaryphoneline3\": \"THE_PHONE3\" } }, the phone number field identifiers are /form/primaryphoneline1, /form/primaryphoneline2, and /form/primaryphoneline3.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named primaryphoneline1, primaryphoneline2, and primaryphoneline3, the phone number field identifiers are primaryphoneline1, primaryphoneline2, and primaryphoneline3.

      " + } + }, + "documentation":"

      The name of a field in the request payload that contains part or all of your customer's primary phone number.

      This data type is used in the RequestInspectionACFP data type.

      " + }, + "PhoneNumberFields":{ + "type":"list", + "member":{"shape":"PhoneNumberField"} + }, "Platform":{ "type":"string", "enum":[ @@ -4137,6 +4359,28 @@ "CONTAINS_WORD" ] }, + "ProductDescription":{ + "type":"string", + "min":1, + "pattern":".*\\S.*" + }, + "ProductId":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "ProductLink":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*\\S.*" + }, + "ProductTitle":{ + "type":"string", + "min":1, + "pattern":".*\\S.*" + }, "PublishedVersions":{ "type":"map", "key":{"shape":"VersionKeyString"}, @@ -4231,7 +4475,7 @@ "type":"structure", "members":{ }, - "documentation":"

      Inspect the query string of the web request. This is the part of a URL that appears after a ? character, if any.

      This is used only in the FieldToMatch specification for some web request component types.

      JSON specification: \"QueryString\": {}

      " + "documentation":"

      Inspect the query string of the web request. This is the part of a URL that appears after a ? character, if any.

      This is used in the FieldToMatch specification for some web request component types.

      JSON specification: \"QueryString\": {}

      " }, "RateBasedStatement":{ "type":"structure", @@ -4552,6 +4796,12 @@ "min":1, "pattern":".*" }, + "RegistrationPagePathString":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, "RegularExpressionList":{ "type":"list", "member":{"shape":"Regex"} @@ -4605,15 +4855,46 @@ }, "UsernameField":{ "shape":"UsernameField", - "documentation":"

      Details about your login page username field.

      How you specify this depends on the payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"login\": { \"username\": \"THE_USERNAME\", \"password\": \"THE_PASSWORD\" } }, the username field specification is /login/username and the password field specification is /login/password.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named username1 and password1, the username field specification is username1 and the password field specification is password1.

      " + "documentation":"

      The name of the field in the request payload that contains your customer's username.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named username1, the username field specification is username1

      " }, "PasswordField":{ "shape":"PasswordField", - "documentation":"

      Details about your login page password field.

      How you specify this depends on the payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"login\": { \"username\": \"THE_USERNAME\", \"password\": \"THE_PASSWORD\" } }, the username field specification is /login/username and the password field specification is /login/password.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named username1 and password1, the username field specification is username1 and the password field specification is password1.

      " + "documentation":"

      The name of the field in the request payload that contains your customer's password.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named password1, the password field specification is password1.

      " } }, "documentation":"

      The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage.

      This is part of the AWSManagedRulesATPRuleSet configuration in ManagedRuleGroupConfig.

      In these settings, you specify how your application accepts login attempts by providing the request payload type and the names of the fields within the request body where the username and password are provided.

      " }, + "RequestInspectionACFP":{ + "type":"structure", + "required":["PayloadType"], + "members":{ + "PayloadType":{ + "shape":"PayloadType", + "documentation":"

      The payload type for your account creation endpoint, either JSON or form encoded.

      " + }, + "UsernameField":{ + "shape":"UsernameField", + "documentation":"

      The name of the field in the request payload that contains your customer's username.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named username1, the username field specification is username1

      " + }, + "PasswordField":{ + "shape":"PasswordField", + "documentation":"

      The name of the field in the request payload that contains your customer's password.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named password1, the password field specification is password1.

      " + }, + "EmailField":{ + "shape":"EmailField", + "documentation":"

      The name of the field in the request payload that contains your customer's email.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"email\": \"THE_EMAIL\" } }, the email field specification is /form/email.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named email1, the email field specification is email1.

      " + }, + "PhoneNumberFields":{ + "shape":"PhoneNumberFields", + "documentation":"

      The names of the fields in the request payload that contain your customer's primary phone number.

      Order the phone number fields in the array exactly as they are ordered in the request payload.

      How you specify the phone number fields depends on the request inspection payload type.

      • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"primaryphoneline1\": \"THE_PHONE1\", \"primaryphoneline2\": \"THE_PHONE2\", \"primaryphoneline3\": \"THE_PHONE3\" } }, the phone number field identifiers are /form/primaryphoneline1, /form/primaryphoneline2, and /form/primaryphoneline3.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named primaryphoneline1, primaryphoneline2, and primaryphoneline3, the phone number field identifiers are primaryphoneline1, primaryphoneline2, and primaryphoneline3.

      " + }, + "AddressFields":{ + "shape":"AddressFields", + "documentation":"

      The names of the fields in the request payload that contain your customer's primary physical address.

      Order the address fields in the array exactly as they are ordered in the request payload.

      How you specify the address fields depends on the request inspection payload type.

      • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"primaryaddressline1\": \"THE_ADDRESS1\", \"primaryaddressline2\": \"THE_ADDRESS2\", \"primaryaddressline3\": \"THE_ADDRESS3\" } }, the address field idenfiers are /form/primaryaddressline1, /form/primaryaddressline2, and /form/primaryaddressline3.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with input elements named primaryaddressline1, primaryaddressline2, and primaryaddressline3, the address fields identifiers are primaryaddressline1, primaryaddressline2, and primaryaddressline3.

      " + } + }, + "documentation":"

      The criteria for inspecting account creation requests, used by the ACFP rule group to validate and track account creation attempts.

      This is part of the AWSManagedRulesACFPRuleSet configuration in ManagedRuleGroupConfig.

      In these settings, you specify how your application accepts account creation attempts by providing the request payload type and the names of the fields within the request body where the username, password, email, and primary address and phone number fields are provided.

      " + }, "ResourceArn":{ "type":"string", "max":2048, @@ -4655,22 +4936,22 @@ "members":{ "StatusCode":{ "shape":"ResponseInspectionStatusCode", - "documentation":"

      Configures inspection of the response status code.

      " + "documentation":"

      Configures inspection of the response status code for success and failure indicators.

      " }, "Header":{ "shape":"ResponseInspectionHeader", - "documentation":"

      Configures inspection of the response header.

      " + "documentation":"

      Configures inspection of the response header for success and failure indicators.

      " }, "BodyContains":{ "shape":"ResponseInspectionBodyContains", - "documentation":"

      Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body.

      " + "documentation":"

      Configures inspection of the response body for success and failure indicators. WAF can inspect the first 65,536 bytes (64 KB) of the response body.

      " }, "Json":{ "shape":"ResponseInspectionJson", - "documentation":"

      Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON.

      " + "documentation":"

      Configures inspection of the response JSON for success and failure indicators. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON.

      " } }, - "documentation":"

      The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

      The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that submit too many failed login attempts in a short amount of time.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      This is part of the AWSManagedRulesATPRuleSet configuration in ManagedRuleGroupConfig.

      Enable login response inspection by configuring exactly one component of the response to inspect. You can't configure more than one. If you don't configure any of the response inspection options, response inspection is disabled.

      " + "documentation":"

      The criteria for inspecting responses to login requests and account creation requests, used by the ATP and ACFP rule groups to track login and account creation success and failure rates.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      The rule groups evaluates the responses that your protected resources send back to client login and account creation attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses with too much suspicious activity in a short amount of time.

      This is part of the AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet configurations in ManagedRuleGroupConfig.

      Enable response inspection by configuring exactly one component of the response to inspect, for example, Header or StatusCode. You can't configure more than one component for inspection. If you don't configure any of the response inspection options, response inspection is disabled.

      " }, "ResponseInspectionBodyContains":{ "type":"structure", @@ -4681,14 +4962,14 @@ "members":{ "SuccessStrings":{ "shape":"ResponseInspectionBodyContainsSuccessStrings", - "documentation":"

      Strings in the body of the response that indicate a successful login attempt. To be counted as a successful login, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

      JSON example: \"SuccessStrings\": [ \"Login successful\", \"Welcome to our site!\" ]

      " + "documentation":"

      Strings in the body of the response that indicate a successful login or account creation attempt. To be counted as a success, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

      JSON examples: \"SuccessStrings\": [ \"Login successful\" ] and \"SuccessStrings\": [ \"Account creation successful\", \"Welcome to our site!\" ]

      " }, "FailureStrings":{ "shape":"ResponseInspectionBodyContainsFailureStrings", - "documentation":"

      Strings in the body of the response that indicate a failed login attempt. To be counted as a failed login, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

      JSON example: \"FailureStrings\": [ \"Login failed\" ]

      " + "documentation":"

      Strings in the body of the response that indicate a failed login or account creation attempt. To be counted as a failure, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

      JSON example: \"FailureStrings\": [ \"Request failed\" ]

      " } }, - "documentation":"

      Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " + "documentation":"

      Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " }, "ResponseInspectionBodyContainsFailureStrings":{ "type":"list", @@ -4712,18 +4993,18 @@ "members":{ "Name":{ "shape":"ResponseInspectionHeaderName", - "documentation":"

      The name of the header to match against. The name must be an exact match, including case.

      JSON example: \"Name\": [ \"LoginResult\" ]

      " + "documentation":"

      The name of the header to match against. The name must be an exact match, including case.

      JSON example: \"Name\": [ \"RequestResult\" ]

      " }, "SuccessValues":{ "shape":"ResponseInspectionHeaderSuccessValues", - "documentation":"

      Values in the response header with the specified name that indicate a successful login attempt. To be counted as a successful login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"SuccessValues\": [ \"LoginPassed\", \"Successful login\" ]

      " + "documentation":"

      Values in the response header with the specified name that indicate a successful login or account creation attempt. To be counted as a success, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON examples: \"SuccessValues\": [ \"LoginPassed\", \"Successful login\" ] and \"SuccessValues\": [ \"AccountCreated\", \"Successful account creation\" ]

      " }, "FailureValues":{ "shape":"ResponseInspectionHeaderFailureValues", - "documentation":"

      Values in the response header with the specified name that indicate a failed login attempt. To be counted as a failed login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"FailureValues\": [ \"LoginFailed\", \"Failed login\" ]

      " + "documentation":"

      Values in the response header with the specified name that indicate a failed login or account creation attempt. To be counted as a failure, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON examples: \"FailureValues\": [ \"LoginFailed\", \"Failed login\" ] and \"FailureValues\": [ \"AccountCreationFailed\" ]

      " } }, - "documentation":"

      Configures inspection of the response header. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " + "documentation":"

      Configures inspection of the response header. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " }, "ResponseInspectionHeaderFailureValues":{ "type":"list", @@ -4753,18 +5034,18 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

      The identifier for the value to match against in the JSON. The identifier must be an exact match, including case.

      JSON example: \"Identifier\": [ \"/login/success\" ]

      " + "documentation":"

      The identifier for the value to match against in the JSON. The identifier must be an exact match, including case.

      JSON examples: \"Identifier\": [ \"/login/success\" ] and \"Identifier\": [ \"/sign-up/success\" ]

      " }, "SuccessValues":{ "shape":"ResponseInspectionJsonSuccessValues", - "documentation":"

      Values for the specified identifier in the response JSON that indicate a successful login attempt. To be counted as a successful login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"SuccessValues\": [ \"True\", \"Succeeded\" ]

      " + "documentation":"

      Values for the specified identifier in the response JSON that indicate a successful login or account creation attempt. To be counted as a success, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"SuccessValues\": [ \"True\", \"Succeeded\" ]

      " }, "FailureValues":{ "shape":"ResponseInspectionJsonFailureValues", - "documentation":"

      Values for the specified identifier in the response JSON that indicate a failed login attempt. To be counted as a failed login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"FailureValues\": [ \"False\", \"Failed\" ]

      " + "documentation":"

      Values for the specified identifier in the response JSON that indicate a failed login or account creation attempt. To be counted as a failure, the value must be an exact match, including case. Each value must be unique among the success and failure values.

      JSON example: \"FailureValues\": [ \"False\", \"Failed\" ]

      " } }, - "documentation":"

      Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " + "documentation":"

      Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " }, "ResponseInspectionJsonFailureValues":{ "type":"list", @@ -4787,14 +5068,14 @@ "members":{ "SuccessCodes":{ "shape":"ResponseInspectionStatusCodeSuccessCodes", - "documentation":"

      Status codes in the response that indicate a successful login attempt. To be counted as a successful login, the response status code must match one of these. Each code must be unique among the success and failure status codes.

      JSON example: \"SuccessCodes\": [ 200, 201 ]

      " + "documentation":"

      Status codes in the response that indicate a successful login or account creation attempt. To be counted as a success, the response status code must match one of these. Each code must be unique among the success and failure status codes.

      JSON example: \"SuccessCodes\": [ 200, 201 ]

      " }, "FailureCodes":{ "shape":"ResponseInspectionStatusCodeFailureCodes", - "documentation":"

      Status codes in the response that indicate a failed login attempt. To be counted as a failed login, the response status code must match one of these. Each code must be unique among the success and failure status codes.

      JSON example: \"FailureCodes\": [ 400, 404 ]

      " + "documentation":"

      Status codes in the response that indicate a failed login or account creation attempt. To be counted as a failure, the response status code must match one of these. Each code must be unique among the success and failure status codes.

      JSON example: \"FailureCodes\": [ 400, 404 ]

      " } }, - "documentation":"

      Configures inspection of the response status code. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " + "documentation":"

      Configures inspection of the response status code. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

      Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

      " }, "ResponseInspectionStatusCodeFailureCodes":{ "type":"list", @@ -5266,7 +5547,7 @@ }, "ManagedRuleGroupStatement":{ "shape":"ManagedRuleGroupStatement", - "documentation":"

      A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

      You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

      You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet or the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet. For more information, see WAF Pricing.

      " + "documentation":"

      A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

      You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

      You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

      " }, "LabelMatchStatement":{ "shape":"LabelMatchStatement", @@ -5760,7 +6041,7 @@ "type":"structure", "members":{ }, - "documentation":"

      Inspect the path component of the URI of the web request. This is the part of the web request that identifies a resource. For example, /images/daily-ad.jpg.

      This is used only in the FieldToMatch specification for some web request component types.

      JSON specification: \"UriPath\": {}

      " + "documentation":"

      Inspect the path component of the URI of the web request. This is the part of the web request that identifies a resource. For example, /images/daily-ad.jpg.

      This is used in the FieldToMatch specification for some web request component types.

      JSON specification: \"UriPath\": {}

      " }, "UsernameField":{ "type":"structure", @@ -5768,10 +6049,10 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

      The name of the username field. For example /form/username.

      " + "documentation":"

      The name of the username field.

      How you specify this depends on the request inspection payload type.

      • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

        For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

      • For form encoded payload types, use the HTML form names.

        For example, for an HTML form with the input element named username1, the username field specification is username1

      " } }, - "documentation":"

      Details about your login page username field for request inspection, used in the AWSManagedRulesATPRuleSet RequestInspection configuration.

      " + "documentation":"

      The name of the field in the request payload that contains your customer's username.

      This data type is used in the RequestInspection and RequestInspectionACFP data types.

      " }, "VendorName":{ "type":"string", @@ -5814,11 +6095,11 @@ "members":{ "SampledRequestsEnabled":{ "shape":"Boolean", - "documentation":"

      A boolean indicating whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console.

      " + "documentation":"

      Indicates whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console.

      " }, "CloudWatchMetricsEnabled":{ "shape":"Boolean", - "documentation":"

      A boolean indicating whether the associated resource sends metrics to Amazon CloudWatch. For the list of available metrics, see WAF Metrics in the WAF Developer Guide.

      " + "documentation":"

      Indicates whether the associated resource sends metrics to Amazon CloudWatch. For the list of available metrics, see WAF Metrics in the WAF Developer Guide.

      For web ACLs, the metrics are for web requests that have the web ACL default action applied. WAF applies the default action to web requests that pass the inspection of all rules in the web ACL without being either allowed or blocked. For more information, see The web ACL default action in the WAF Developer Guide.

      " }, "MetricName":{ "shape":"MetricName", diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index fd2636c1b852..713c7e29b05b 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json index e2d9e2d68289..fd24c74bd14a 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json +++ b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,21 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListProfileNotifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListProfileShares": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListProfiles": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListShareInvitations": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json index 8362187968f2..5ccdcc1bca3b 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json +++ b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json @@ -30,6 +30,23 @@ ], "documentation":"

      Associate a lens to a workload.

      Up to 10 lenses can be associated with a workload in a single API operation. A maximum of 20 lenses can be associated with a workload.

      Disclaimer

      By accessing and/or applying custom lenses created by another Amazon Web Services user or account, you acknowledge that custom lenses created by other users and shared with you are Third Party Content as defined in the Amazon Web Services Customer Agreement.

      " }, + "AssociateProfiles":{ + "name":"AssociateProfiles", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/associateProfiles" + }, + "input":{"shape":"AssociateProfilesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Associate a profile with a workload.

      " + }, "CreateLensShare":{ "name":"CreateLensShare", "http":{ @@ -87,6 +104,43 @@ ], "documentation":"

      Create a milestone for an existing workload.

      " }, + "CreateProfile":{ + "name":"CreateProfile", + "http":{ + "method":"POST", + "requestUri":"/profiles" + }, + "input":{"shape":"CreateProfileInput"}, + "output":{"shape":"CreateProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Create a profile.

      " + }, + "CreateProfileShare":{ + "name":"CreateProfileShare", + "http":{ + "method":"POST", + "requestUri":"/profiles/{ProfileArn}/shares" + }, + "input":{"shape":"CreateProfileShareInput"}, + "output":{"shape":"CreateProfileShareOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Create a profile share.

      " + }, "CreateWorkload":{ "name":"CreateWorkload", "http":{ @@ -159,6 +213,40 @@ ], "documentation":"

      Delete a lens share.

      After the lens share is deleted, Amazon Web Services accounts, users, organizations, and organizational units (OUs) that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.

      Disclaimer

      By sharing your custom lenses with other Amazon Web Services accounts, you acknowledge that Amazon Web Services will make your custom lenses available to those other accounts. Those other accounts may continue to access and use your shared custom lenses even if you delete the custom lenses from your own Amazon Web Services account or terminate your Amazon Web Services account.

      " }, + "DeleteProfile":{ + "name":"DeleteProfile", + "http":{ + "method":"DELETE", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"DeleteProfileInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Delete a profile.

      Disclaimer

      By sharing your profile with other Amazon Web Services accounts, you acknowledge that Amazon Web Services will make your profile available to those other accounts. Those other accounts may continue to access and use your shared profile even if you delete the profile from your own Amazon Web Services account or terminate your Amazon Web Services account.

      " + }, + "DeleteProfileShare":{ + "name":"DeleteProfileShare", + "http":{ + "method":"DELETE", + "requestUri":"/profiles/{ProfileArn}/shares/{ShareId}" + }, + "input":{"shape":"DeleteProfileShareInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Delete a profile share.

      " + }, "DeleteWorkload":{ "name":"DeleteWorkload", "http":{ @@ -210,6 +298,23 @@ ], "documentation":"

      Disassociate a lens from a workload.

      Up to 10 lenses can be disassociated from a workload in a single API operation.

      The Amazon Web Services Well-Architected Framework lens (wellarchitected) cannot be removed from a workload.

      " }, + "DisassociateProfiles":{ + "name":"DisassociateProfiles", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/disassociateProfiles" + }, + "input":{"shape":"DisassociateProfilesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Disassociate a profile from a workload.

      " + }, "ExportLens":{ "name":"ExportLens", "http":{ @@ -346,6 +451,40 @@ ], "documentation":"

      Get a milestone for an existing workload.

      " }, + "GetProfile":{ + "name":"GetProfile", + "http":{ + "method":"GET", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"GetProfileInput"}, + "output":{"shape":"GetProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Get profile information.

      " + }, + "GetProfileTemplate":{ + "name":"GetProfileTemplate", + "http":{ + "method":"GET", + "requestUri":"/profileTemplate" + }, + "input":{"shape":"GetProfileTemplateInput"}, + "output":{"shape":"GetProfileTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Get profile template.

      " + }, "GetWorkload":{ "name":"GetWorkload", "http":{ @@ -533,6 +672,55 @@ ], "documentation":"

      List lens notifications.

      " }, + "ListProfileNotifications":{ + "name":"ListProfileNotifications", + "http":{ + "method":"GET", + "requestUri":"/profileNotifications/" + }, + "input":{"shape":"ListProfileNotificationsInput"}, + "output":{"shape":"ListProfileNotificationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      List profile notifications.

      " + }, + "ListProfileShares":{ + "name":"ListProfileShares", + "http":{ + "method":"GET", + "requestUri":"/profiles/{ProfileArn}/shares" + }, + "input":{"shape":"ListProfileSharesInput"}, + "output":{"shape":"ListProfileSharesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      List profile shares.

      " + }, + "ListProfiles":{ + "name":"ListProfiles", + "http":{ + "method":"GET", + "requestUri":"/profileSummaries" + }, + "input":{"shape":"ListProfilesInput"}, + "output":{"shape":"ListProfilesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      List profiles.

      " + }, "ListShareInvitations":{ "name":"ListShareInvitations", "http":{ @@ -561,7 +749,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      List the tags for a resource.

      The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

      " + "documentation":"

      List the tags for a resource.

      The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

      " }, "ListWorkloadShares":{ "name":"ListWorkloadShares", @@ -608,7 +796,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Adds one or more tags to the specified resource.

      The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

      " + "documentation":"

      Adds one or more tags to the specified resource.

      The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

      " }, "UntagResource":{ "name":"UntagResource", @@ -622,7 +810,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

      Deletes specified tags from a resource.

      The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

      To specify multiple tags, use separate tagKeys parameters, for example:

      DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2

      " + "documentation":"

      Deletes specified tags from a resource.

      The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

      To specify multiple tags, use separate tagKeys parameters, for example:

      DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2

      " }, "UpdateAnswer":{ "name":"UpdateAnswer", @@ -676,6 +864,24 @@ ], "documentation":"

      Update lens review for a particular workload.

      " }, + "UpdateProfile":{ + "name":"UpdateProfile", + "http":{ + "method":"PATCH", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"UpdateProfileInput"}, + "output":{"shape":"UpdateProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Update a profile.

      " + }, "UpdateShareInvitation":{ "name":"UpdateShareInvitation", "http":{ @@ -746,6 +952,23 @@ {"shape":"ThrottlingException"} ], "documentation":"

      Upgrade lens review for a particular workload.

      " + }, + "UpgradeProfileVersion":{ + "name":"UpgradeProfileVersion", + "http":{ + "method":"PUT", + "requestUri":"/workloads/{WorkloadId}/profiles/{ProfileArn}/upgrade" + }, + "input":{"shape":"UpgradeProfileVersionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

      Upgrade a profile.

      " } }, "shapes":{ @@ -851,6 +1074,10 @@ "Reason":{ "shape":"AnswerReason", "documentation":"

      The reason why a choice is non-applicable to a question in your workload.

      " + }, + "QuestionType":{ + "shape":"QuestionType", + "documentation":"

      The type of the question.

      " } }, "documentation":"

      An answer summary of a lens review in a workload.

      " @@ -876,9 +1103,29 @@ }, "documentation":"

      Input to associate lens reviews.

      " }, + "AssociateProfilesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArns" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArns":{ + "shape":"ProfileArns", + "documentation":"

      The list of profile ARNs to associate with the workload.

      " + } + } + }, "AwsAccountId":{ "type":"string", "documentation":"

      An Amazon Web Services account ID.

      ", + "max":12, + "min":12, "pattern":"[0-9]{12}" }, "AwsRegion":{ @@ -1196,7 +1443,9 @@ }, "ClientRequestToken":{ "type":"string", - "documentation":"

      A unique case-sensitive string used to ensure that this request is idempotent (executes only once).

      You should not reuse the same token for other requests. If you retry a request with the same client request token and the same parameters after the original request has completed successfully, the result of the original request is returned.

      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, you must provide this token or the request will fail.

      " + "documentation":"

      A unique case-sensitive string used to ensure that this request is idempotent (executes only once).

      You should not reuse the same token for other requests. If you retry a request with the same client request token and the same parameters after the original request has completed successfully, the result of the original request is returned.

      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, you must provide this token or the request will fail.

      ", + "max":2048, + "min":1 }, "ConflictException":{ "type":"structure", @@ -1341,6 +1590,81 @@ }, "documentation":"

      Output of a create milestone call.

      " }, + "CreateProfileInput":{ + "type":"structure", + "required":[ + "ProfileName", + "ProfileDescription", + "ProfileQuestions", + "ClientRequestToken" + ], + "members":{ + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

      Name of the profile.

      " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

      The profile description.

      " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestionUpdates", + "documentation":"

      The profile questions.

      " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags assigned to the profile.

      " + } + } + }, + "CreateProfileOutput":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      Version of the profile.

      " + } + } + }, + "CreateProfileShareInput":{ + "type":"structure", + "required":[ + "ProfileArn", + "SharedWith", + "ClientRequestToken" + ], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "SharedWith":{"shape":"SharedWith"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "CreateProfileShareOutput":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + } + } + }, "CreateWorkloadInput":{ "type":"structure", "required":[ @@ -1379,6 +1703,10 @@ "Applications":{ "shape":"WorkloadApplications", "documentation":"

      List of AppRegistry application ARNs associated to the workload.

      " + }, + "ProfileArns":{ + "shape":"WorkloadProfileArns", + "documentation":"

      The list of profile ARNs associated with the workload.

      " } }, "documentation":"

      Input for workload creation.

      " @@ -1482,6 +1810,54 @@ } } }, + "DeleteProfileInput":{ + "type":"structure", + "required":[ + "ProfileArn", + "ClientRequestToken" + ], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + } + }, + "DeleteProfileShareInput":{ + "type":"structure", + "required":[ + "ShareId", + "ProfileArn", + "ClientRequestToken" + ], + "members":{ + "ShareId":{ + "shape":"ShareId", + "location":"uri", + "locationName":"ShareId" + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + } + }, "DeleteWorkloadInput":{ "type":"structure", "required":[ @@ -1554,15 +1930,33 @@ }, "documentation":"

      Input to disassociate lens reviews.

      " }, - "DiscoveryIntegrationStatus":{ - "type":"string", - "enum":[ - "ENABLED", - "DISABLED" - ] - }, - "DisplayText":{ - "type":"string", + "DisassociateProfilesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArns" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArns":{ + "shape":"ProfileArns", + "documentation":"

      The list of profile ARNs to disassociate from the workload.

      " + } + } + }, + "DiscoveryIntegrationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "DisplayText":{ + "type":"string", "max":64, "min":1 }, @@ -1867,6 +2261,47 @@ }, "documentation":"

      Output of a get milestone call.

      " }, + "GetProfileInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The profile version.

      ", + "location":"querystring", + "locationName":"ProfileVersion" + } + } + }, + "GetProfileOutput":{ + "type":"structure", + "members":{ + "Profile":{ + "shape":"Profile", + "documentation":"

      The profile.

      " + } + } + }, + "GetProfileTemplateInput":{ + "type":"structure", + "members":{ + } + }, + "GetProfileTemplateOutput":{ + "type":"structure", + "members":{ + "ProfileTemplate":{ + "shape":"ProfileTemplate", + "documentation":"

      The profile template.

      " + } + } + }, "GetWorkloadInput":{ "type":"structure", "required":["WorkloadId"], @@ -2082,7 +2517,12 @@ "UpdatedAt":{"shape":"Timestamp"}, "Notes":{"shape":"Notes"}, "RiskCounts":{"shape":"RiskCounts"}, - "NextToken":{"shape":"NextToken"} + "NextToken":{"shape":"NextToken"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

      The profiles associated with the workload.

      " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

      A lens review of a question.

      " }, @@ -2121,7 +2561,12 @@ "documentation":"

      The status of the lens.

      " }, "UpdatedAt":{"shape":"Timestamp"}, - "RiskCounts":{"shape":"RiskCounts"} + "RiskCounts":{"shape":"RiskCounts"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

      The profiles associated with the workload.

      " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

      A lens review summary of a workload.

      " }, @@ -2268,6 +2713,12 @@ "documentation":"

      The maximum number of results to return for this request.

      ", "location":"querystring", "locationName":"MaxResults" + }, + "QuestionPriority":{ + "shape":"QuestionPriority", + "documentation":"

      The priority of the question.

      ", + "location":"querystring", + "locationName":"QuestionPriority" } }, "documentation":"

      Input to list answers.

      " @@ -2401,6 +2852,12 @@ "documentation":"

      The maximum number of results to return for this request.

      ", "location":"querystring", "locationName":"MaxResults" + }, + "QuestionPriority":{ + "shape":"QuestionPriority", + "documentation":"

      The priority of the question.

      ", + "location":"querystring", + "locationName":"QuestionPriority" } }, "documentation":"

      Input to list lens review improvements.

      " @@ -2595,6 +3052,122 @@ "NextToken":{"shape":"NextToken"} } }, + "ListProfileNotificationsInput":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"querystring", + "locationName":"WorkloadId" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListProfileNotificationsOutput":{ + "type":"structure", + "members":{ + "NotificationSummaries":{ + "shape":"ProfileNotificationSummaries", + "documentation":"

      Notification summaries.

      " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListProfileSharesInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "SharedWithPrefix":{ + "shape":"SharedWithPrefix", + "documentation":"

      The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the profile is shared.

      ", + "location":"querystring", + "locationName":"SharedWithPrefix" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListProfileSharesMaxResults", + "documentation":"

      The maximum number of results to return for this request.

      ", + "location":"querystring", + "locationName":"MaxResults" + }, + "Status":{ + "shape":"ShareStatus", + "location":"querystring", + "locationName":"Status" + } + } + }, + "ListProfileSharesMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListProfileSharesOutput":{ + "type":"structure", + "members":{ + "ProfileShareSummaries":{ + "shape":"ProfileShareSummaries", + "documentation":"

      Profile share summaries.

      " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListProfilesInput":{ + "type":"structure", + "members":{ + "ProfileNamePrefix":{ + "shape":"ProfileNamePrefix", + "documentation":"

      Prefix for profile name.

      ", + "location":"querystring", + "locationName":"ProfileNamePrefix" + }, + "ProfileOwnerType":{ + "shape":"ProfileOwnerType", + "documentation":"

      Profile owner type.

      ", + "location":"querystring", + "locationName":"ProfileOwnerType" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListProfilesOutput":{ + "type":"structure", + "members":{ + "ProfileSummaries":{ + "shape":"ProfileSummaries", + "documentation":"

      Profile summaries.

      " + }, + "NextToken":{"shape":"NextToken"} + } + }, "ListShareInvitationsInput":{ "type":"structure", "members":{ @@ -2625,6 +3198,12 @@ "documentation":"

      The maximum number of results to return for this request.

      ", "location":"querystring", "locationName":"MaxResults" + }, + "ProfileNamePrefix":{ + "shape":"ProfileNamePrefix", + "documentation":"

      Profile name prefix.

      ", + "location":"querystring", + "locationName":"ProfileNamePrefix" } }, "documentation":"

      Input for List Share Invitations

      " @@ -2744,6 +3323,10 @@ "max":50, "min":1 }, + "MaxSelectedProfileChoices":{ + "type":"integer", + "min":0 + }, "MetricType":{ "type":"string", "enum":["WORKLOAD"] @@ -2785,6 +3368,10 @@ }, "documentation":"

      A milestone summary return object.

      " }, + "MinSelectedProfileChoices":{ + "type":"integer", + "min":0 + }, "NextToken":{ "type":"string", "documentation":"

      The token to use to retrieve the next set of results.

      " @@ -2828,7 +3415,7 @@ }, "PermissionType":{ "type":"string", - "documentation":"

      Permission granted on a workload share.

      ", + "documentation":"

      Permission granted on a share request.

      ", "enum":[ "READONLY", "CONTRIBUTOR" @@ -2899,10 +3486,280 @@ "PillarId":{"shape":"PillarId"}, "PillarName":{"shape":"PillarName"}, "Notes":{"shape":"Notes"}, - "RiskCounts":{"shape":"RiskCounts"} + "RiskCounts":{"shape":"RiskCounts"}, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

      A pillar review summary of a lens review.

      " }, + "Profile":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The profile version.

      " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

      The profile name.

      " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

      The profile description.

      " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestions", + "documentation":"

      Profile questions.

      " + }, + "Owner":{"shape":"AwsAccountId"}, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"}, + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

      The ID assigned to the share invitation.

      " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

      The tags assigned to the profile.

      " + } + }, + "documentation":"

      A profile.

      " + }, + "ProfileArn":{ + "type":"string", + "max":2084, + "pattern":"arn:aws[-a-z]*:wellarchitected:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:profile/[a-z0-9]+" + }, + "ProfileArns":{ + "type":"list", + "member":{"shape":"ProfileArn"}, + "min":1 + }, + "ProfileChoice":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "ChoiceTitle":{"shape":"ChoiceTitle"}, + "ChoiceDescription":{"shape":"ChoiceDescription"} + }, + "documentation":"

      The profile choice.

      " + }, + "ProfileDescription":{ + "type":"string", + "max":100, + "min":3, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileName":{ + "type":"string", + "max":100, + "min":3, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileNamePrefix":{ + "type":"string", + "max":100, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileNotificationSummaries":{ + "type":"list", + "member":{"shape":"ProfileNotificationSummary"} + }, + "ProfileNotificationSummary":{ + "type":"structure", + "members":{ + "CurrentProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The current profile version.

      " + }, + "LatestProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The latest profile version.

      " + }, + "Type":{ + "shape":"ProfileNotificationType", + "documentation":"

      Type of notification.

      " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

      The profile name.

      " + }, + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadName":{"shape":"WorkloadName"} + }, + "documentation":"

      The profile notification summary.

      " + }, + "ProfileNotificationType":{ + "type":"string", + "enum":[ + "PROFILE_ANSWERS_UPDATED", + "PROFILE_DELETED" + ] + }, + "ProfileOwnerType":{ + "type":"string", + "enum":[ + "SELF", + "SHARED" + ] + }, + "ProfileQuestion":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "QuestionDescription":{"shape":"QuestionDescription"}, + "QuestionChoices":{ + "shape":"ProfileQuestionChoices", + "documentation":"

      The question choices.

      " + }, + "SelectedChoiceIds":{ + "shape":"SelectedChoiceIds", + "documentation":"

      The selected choices.

      " + }, + "MinSelectedChoices":{ + "shape":"MinSelectedProfileChoices", + "documentation":"

      The minimum number of selected choices.

      " + }, + "MaxSelectedChoices":{ + "shape":"MaxSelectedProfileChoices", + "documentation":"

      The maximum number of selected choices.

      " + } + }, + "documentation":"

      A profile question.

      " + }, + "ProfileQuestionChoices":{ + "type":"list", + "member":{"shape":"ProfileChoice"} + }, + "ProfileQuestionUpdate":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "SelectedChoiceIds":{ + "shape":"SelectedProfileChoiceIds", + "documentation":"

      The selected choices.

      " + } + }, + "documentation":"

      An update to a profile question.

      " + }, + "ProfileQuestionUpdates":{ + "type":"list", + "member":{"shape":"ProfileQuestionUpdate"} + }, + "ProfileQuestions":{ + "type":"list", + "member":{"shape":"ProfileQuestion"} + }, + "ProfileShareSummaries":{ + "type":"list", + "member":{"shape":"ProfileShareSummary"} + }, + "ProfileShareSummary":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "SharedWith":{"shape":"SharedWith"}, + "Status":{"shape":"ShareStatus"}, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

      Profile share invitation status message.

      " + } + }, + "documentation":"

      Summary of a profile share.

      " + }, + "ProfileSummaries":{ + "type":"list", + "member":{"shape":"ProfileSummary"} + }, + "ProfileSummary":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The profile version.

      " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

      The profile name.

      " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

      The profile description.

      " + }, + "Owner":{"shape":"AwsAccountId"}, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"} + }, + "documentation":"

      Summary of a profile.

      " + }, + "ProfileTemplate":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"ProfileName", + "documentation":"

      The name of the profile template.

      " + }, + "TemplateQuestions":{ + "shape":"TemplateQuestions", + "documentation":"

      Profile template questions.

      " + }, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"} + }, + "documentation":"

      The profile template.

      " + }, + "ProfileTemplateChoice":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "ChoiceTitle":{"shape":"ChoiceTitle"}, + "ChoiceDescription":{"shape":"ChoiceDescription"} + }, + "documentation":"

      A profile template choice.

      " + }, + "ProfileTemplateQuestion":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "QuestionDescription":{"shape":"QuestionDescription"}, + "QuestionChoices":{ + "shape":"ProfileTemplateQuestionChoices", + "documentation":"

      The question choices.

      " + }, + "MinSelectedChoices":{ + "shape":"MinSelectedProfileChoices", + "documentation":"

      The minimum number of choices selected.

      " + }, + "MaxSelectedChoices":{ + "shape":"MaxSelectedProfileChoices", + "documentation":"

      The maximum number of choices selected.

      " + } + }, + "documentation":"

      A profile template question.

      " + }, + "ProfileTemplateQuestionChoices":{ + "type":"list", + "member":{"shape":"ProfileTemplateChoice"} + }, + "ProfileVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z0-9-]+$" + }, "QuestionDescription":{ "type":"string", "documentation":"

      The description of the question.

      ", @@ -2947,12 +3804,26 @@ "type":"list", "member":{"shape":"QuestionMetric"} }, + "QuestionPriority":{ + "type":"string", + "enum":[ + "PRIORITIZED", + "NONE" + ] + }, "QuestionTitle":{ "type":"string", "documentation":"

      The title of the question.

      ", "max":512, "min":1 }, + "QuestionType":{ + "type":"string", + "enum":[ + "PRIORITIZED", + "NON_PRIORITIZED" + ] + }, "QuotaCode":{ "type":"string", "documentation":"

      Service Quotas requirement to identify originating quota.

      " @@ -2997,11 +3868,19 @@ "value":{"shape":"Count"}, "documentation":"

      A map from risk names to the count of how many questions have that rating.

      " }, + "SelectedChoiceIds":{ + "type":"list", + "member":{"shape":"ChoiceId"} + }, "SelectedChoices":{ "type":"list", "member":{"shape":"ChoiceId"}, "documentation":"

      List of selected choice IDs in a question answer.

      The values entered replace the previously selected choices.

      " }, + "SelectedProfileChoiceIds":{ + "type":"list", + "member":{"shape":"ChoiceId"} + }, "ServiceCode":{ "type":"string", "documentation":"

      Service Quotas requirement to identify originating service.

      " @@ -3026,7 +3905,7 @@ }, "ShareId":{ "type":"string", - "documentation":"

      The ID associated with the workload share.

      ", + "documentation":"

      The ID associated with the share.

      ", "pattern":"[0-9a-f]{32}" }, "ShareInvitation":{ @@ -3045,6 +3924,10 @@ "LensArn":{ "shape":"LensArn", "documentation":"

      The ARN for the lens.

      " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " } }, "documentation":"

      The share invitation.

      " @@ -3085,6 +3968,14 @@ "LensArn":{ "shape":"LensArn", "documentation":"

      The ARN for the lens.

      " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

      The profile name.

      " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " } }, "documentation":"

      A share invitation summary return object.

      " @@ -3093,12 +3984,13 @@ "type":"string", "enum":[ "WORKLOAD", - "LENS" + "LENS", + "PROFILE" ] }, "ShareStatus":{ "type":"string", - "documentation":"

      The status of a workload share.

      ", + "documentation":"

      The status of the share request.

      ", "enum":[ "ACCEPTED", "REJECTED", @@ -3112,7 +4004,7 @@ }, "SharedWith":{ "type":"string", - "documentation":"

      The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the workload is shared.

      ", + "documentation":"

      The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the workload, lens, or profile is shared.

      ", "max":2048, "min":12 }, @@ -3171,6 +4063,10 @@ "max":256, "min":0 }, + "TemplateQuestions":{ + "type":"list", + "member":{"shape":"ProfileTemplateQuestion"} + }, "ThrottlingException":{ "type":"structure", "required":["Message"], @@ -3312,6 +4208,35 @@ }, "documentation":"

      Output of a update lens review call.

      " }, + "UpdateProfileInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

      The profile description.

      " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestionUpdates", + "documentation":"

      Profile questions.

      " + } + } + }, + "UpdateProfileOutput":{ + "type":"structure", + "members":{ + "Profile":{ + "shape":"Profile", + "documentation":"

      The profile.

      " + } + } + }, "UpdateShareInvitationInput":{ "type":"structure", "required":[ @@ -3434,6 +4359,31 @@ "ClientRequestToken":{"shape":"ClientRequestToken"} } }, + "UpgradeProfileVersionInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArn" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      ", + "location":"uri", + "locationName":"ProfileArn" + }, + "MilestoneName":{"shape":"MilestoneName"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, "Urls":{ "type":"list", "member":{"shape":"ChoiceContent"} @@ -3533,7 +4483,12 @@ "Applications":{ "shape":"WorkloadApplications", "documentation":"

      List of AppRegistry application ARNs associated to the workload.

      " - } + }, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

      Profile associated with a workload.

      " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

      A workload return object.

      " }, @@ -3595,6 +4550,8 @@ "WorkloadId":{ "type":"string", "documentation":"

      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

      ", + "max":32, + "min":32, "pattern":"[0-9a-f]{32}" }, "WorkloadImprovementStatus":{ @@ -3650,6 +4607,30 @@ "member":{"shape":"PillarId"}, "documentation":"

      The priorities of the pillars, which are used to order items in the improvement plan. Each pillar is represented by its PillarReviewSummary$PillarId.

      " }, + "WorkloadProfile":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

      The profile ARN.

      " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

      The profile version.

      " + } + }, + "documentation":"

      The profile associated with a workload.

      " + }, + "WorkloadProfileArns":{ + "type":"list", + "member":{"shape":"ProfileArn"}, + "max":1 + }, + "WorkloadProfiles":{ + "type":"list", + "member":{"shape":"WorkloadProfile"}, + "max":1 + }, "WorkloadResourceDefinition":{ "type":"list", "member":{"shape":"DefinitionType"} @@ -3707,7 +4688,12 @@ "UpdatedAt":{"shape":"Timestamp"}, "Lenses":{"shape":"WorkloadLenses"}, "RiskCounts":{"shape":"RiskCounts"}, - "ImprovementStatus":{"shape":"WorkloadImprovementStatus"} + "ImprovementStatus":{"shape":"WorkloadImprovementStatus"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

      Profile associated with a workload.

      " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

      A workload summary return object.

      " } diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index 542873ad97af..e0684aab16c0 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 99be1cc960d7..4b7efd7ba884 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index b303e9e885f5..8d05d28afe48 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index e9695795be66..ee418dbfa141 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 556da74d77a5..438ccf6a89ba 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 155a4e08dc7f..53fc886a634b 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index 03bcbc2fdb67..b623286a6402 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json index 3bc54f2343b1..1552c84bcb89 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-tests.json b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-tests.json index 02b9e9cf043d..c62e398b8733 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,198 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.eu-central-1.amazonaws.com" + "url": "https://workspaces-web.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.us-west-2.api.aws" + "url": "https://workspaces-web.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -203,377 +34,266 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.eu-west-1.api.aws" + "url": "https://workspaces-web-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.eu-west-1.amazonaws.com" + "url": "https://workspaces-web-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web.eu-west-1.amazonaws.com" + "url": "https://workspaces-web.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-northeast-2.api.aws" + "url": "https://workspaces-web-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-northeast-2.amazonaws.com" + "url": "https://workspaces-web-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-northeast-2.api.aws" + "url": "https://workspaces-web.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-northeast-2.amazonaws.com" + "url": "https://workspaces-web.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-northeast-1.api.aws" + "url": "https://workspaces-web-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-northeast-1.amazonaws.com" + "url": "https://workspaces-web-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-northeast-1.api.aws" + "url": "https://workspaces-web.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-northeast-1.amazonaws.com" + "url": "https://workspaces-web.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ap-southeast-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-southeast-1.amazonaws.com" + "url": "https://workspaces-web-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://workspaces-web.ap-southeast-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-southeast-1.amazonaws.com" + "url": "https://workspaces-web.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.ap-southeast-2.amazonaws.com" + "url": "https://workspaces-web-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://workspaces-web.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web.ap-southeast-2.amazonaws.com" + "url": "https://workspaces-web.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://workspaces-web-fips.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://workspaces-web.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -582,7 +302,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -592,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -604,11 +323,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/workspacesweb/src/main/resources/codegen-resources/paginators-1.json b/services/workspacesweb/src/main/resources/codegen-resources/paginators-1.json index 202a6316819a..98a378650342 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/paginators-1.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,11 @@ "output_token": "nextToken", "limit_key": "maxResults" }, + "ListIpAccessSettings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, "ListNetworkSettings": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json index aa2849446d89..1efd59c263c9 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json @@ -32,6 +32,26 @@ "documentation":"

      Associates a browser settings resource with a web portal.

      ", "idempotent":true }, + "AssociateIpAccessSettings":{ + "name":"AssociateIpAccessSettings", + "http":{ + "method":"PUT", + "requestUri":"/portals/{portalArn+}/ipAccessSettings", + "responseCode":200 + }, + "input":{"shape":"AssociateIpAccessSettingsRequest"}, + "output":{"shape":"AssociateIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Associates an IP access settings resource with a web portal.

      ", + "idempotent":true + }, "AssociateNetworkSettings":{ "name":"AssociateNetworkSettings", "http":{ @@ -151,6 +171,25 @@ ], "documentation":"

      Creates an identity provider resource that is then associated with a web portal.

      " }, + "CreateIpAccessSettings":{ + "name":"CreateIpAccessSettings", + "http":{ + "method":"POST", + "requestUri":"/ipAccessSettings", + "responseCode":200 + }, + "input":{"shape":"CreateIpAccessSettingsRequest"}, + "output":{"shape":"CreateIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Creates an IP access settings resource that can be associated with a web portal.

      " + }, "CreateNetworkSettings":{ "name":"CreateNetworkSettings", "http":{ @@ -285,6 +324,25 @@ "documentation":"

      Deletes the identity provider.

      ", "idempotent":true }, + "DeleteIpAccessSettings":{ + "name":"DeleteIpAccessSettings", + "http":{ + "method":"DELETE", + "requestUri":"/ipAccessSettings/{ipAccessSettingsArn+}", + "responseCode":200 + }, + "input":{"shape":"DeleteIpAccessSettingsRequest"}, + "output":{"shape":"DeleteIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

      Deletes IP access settings.

      ", + "idempotent":true + }, "DeleteNetworkSettings":{ "name":"DeleteNetworkSettings", "http":{ @@ -399,6 +457,25 @@ "documentation":"

      Disassociates browser settings from a web portal.

      ", "idempotent":true }, + "DisassociateIpAccessSettings":{ + "name":"DisassociateIpAccessSettings", + "http":{ + "method":"DELETE", + "requestUri":"/portals/{portalArn+}/ipAccessSettings", + "responseCode":200 + }, + "input":{"shape":"DisassociateIpAccessSettingsRequest"}, + "output":{"shape":"DisassociateIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Disassociates IP access settings from a web portal.

      ", + "idempotent":true + }, "DisassociateNetworkSettings":{ "name":"DisassociateNetworkSettings", "http":{ @@ -511,6 +588,24 @@ ], "documentation":"

      Gets the identity provider.

      " }, + "GetIpAccessSettings":{ + "name":"GetIpAccessSettings", + "http":{ + "method":"GET", + "requestUri":"/ipAccessSettings/{ipAccessSettingsArn+}", + "responseCode":200 + }, + "input":{"shape":"GetIpAccessSettingsRequest"}, + "output":{"shape":"GetIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Gets the IP access settings.

      " + }, "GetNetworkSettings":{ "name":"GetNetworkSettings", "http":{ @@ -671,6 +766,23 @@ ], "documentation":"

      Retrieves a list of identity providers for a specific web portal.

      " }, + "ListIpAccessSettings":{ + "name":"ListIpAccessSettings", + "http":{ + "method":"GET", + "requestUri":"/ipAccessSettings", + "responseCode":200 + }, + "input":{"shape":"ListIpAccessSettingsRequest"}, + "output":{"shape":"ListIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Retrieves a list of IP access settings.

      " + }, "ListNetworkSettings":{ "name":"ListNetworkSettings", "http":{ @@ -866,6 +978,24 @@ ], "documentation":"

      Updates the identity provider.

      " }, + "UpdateIpAccessSettings":{ + "name":"UpdateIpAccessSettings", + "http":{ + "method":"PATCH", + "requestUri":"/ipAccessSettings/{ipAccessSettingsArn+}", + "responseCode":200 + }, + "input":{"shape":"UpdateIpAccessSettingsRequest"}, + "output":{"shape":"UpdateIpAccessSettingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

      Updates IP access settings.

      " + }, "UpdateNetworkSettings":{ "name":"UpdateNetworkSettings", "http":{ @@ -898,7 +1028,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

      Updates a web portal.

      ", "idempotent":true @@ -1020,6 +1151,44 @@ } } }, + "AssociateIpAccessSettingsRequest":{ + "type":"structure", + "required":[ + "ipAccessSettingsArn", + "portalArn" + ], + "members":{ + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      ", + "location":"querystring", + "locationName":"ipAccessSettingsArn" + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the web portal.

      ", + "location":"uri", + "locationName":"portalArn" + } + } + }, + "AssociateIpAccessSettingsResponse":{ + "type":"structure", + "required":[ + "ipAccessSettingsArn", + "portalArn" + ], + "members":{ + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings resource.

      " + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the web portal.

      " + } + } + }, "AssociateNetworkSettingsRequest":{ "type":"structure", "required":[ @@ -1408,6 +1577,51 @@ } } }, + "CreateIpAccessSettingsRequest":{ + "type":"structure", + "required":["ipRules"], + "members":{ + "additionalEncryptionContext":{ + "shape":"EncryptionContextMap", + "documentation":"

      Additional encryption context of the IP access settings.

      " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request.

      If you do not specify a client token, one is automatically generated by the AWS SDK.

      ", + "idempotencyToken":true + }, + "customerManagedKey":{ + "shape":"keyArn", + "documentation":"

      The custom managed key of the IP access settings.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the IP access settings.

      " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

      The display name of the IP access settings.

      " + }, + "ipRules":{ + "shape":"IpRuleList", + "documentation":"

      The IP rules of the IP access settings.

      " + }, + "tags":{ + "shape":"TagList", + "documentation":"

      The tags to add to the browser settings resource. A tag is a key-value pair.

      " + } + } + }, + "CreateIpAccessSettingsResponse":{ + "type":"structure", + "required":["ipAccessSettingsArn"], + "members":{ + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings resource.

      " + } + } + }, "CreateNetworkSettingsRequest":{ "type":"structure", "required":[ @@ -1647,6 +1861,23 @@ "members":{ } }, + "DeleteIpAccessSettingsRequest":{ + "type":"structure", + "required":["ipAccessSettingsArn"], + "members":{ + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      ", + "location":"uri", + "locationName":"ipAccessSettingsArn" + } + } + }, + "DeleteIpAccessSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteNetworkSettingsRequest":{ "type":"structure", "required":["networkSettingsArn"], @@ -1732,6 +1963,13 @@ "members":{ } }, + "Description":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^.+$", + "sensitive":true + }, "DisassociateBrowserSettingsRequest":{ "type":"structure", "required":["portalArn"], @@ -1749,6 +1987,23 @@ "members":{ } }, + "DisassociateIpAccessSettingsRequest":{ + "type":"structure", + "required":["portalArn"], + "members":{ + "portalArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the web portal.

      ", + "location":"uri", + "locationName":"portalArn" + } + } + }, + "DisassociateIpAccessSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "DisassociateNetworkSettingsRequest":{ "type":"structure", "required":["portalArn"], @@ -1886,6 +2141,27 @@ } } }, + "GetIpAccessSettingsRequest":{ + "type":"structure", + "required":["ipAccessSettingsArn"], + "members":{ + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      ", + "location":"uri", + "locationName":"ipAccessSettingsArn" + } + } + }, + "GetIpAccessSettingsResponse":{ + "type":"structure", + "members":{ + "ipAccessSettings":{ + "shape":"IpAccessSettings", + "documentation":"

      The IP access settings.

      " + } + } + }, "GetNetworkSettingsRequest":{ "type":"structure", "required":["networkSettingsArn"], @@ -2142,6 +2418,91 @@ "exception":true, "fault":true }, + "IpAccessSettings":{ + "type":"structure", + "required":["ipAccessSettingsArn"], + "members":{ + "associatedPortalArns":{ + "shape":"ArnList", + "documentation":"

      A list of web portal ARNs that this IP access settings resource is associated with.

      " + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The creation date timestamp of the IP access settings.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the IP access settings.

      " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

      The display name of the IP access settings.

      " + }, + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings resource.

      " + }, + "ipRules":{ + "shape":"IpRuleList", + "documentation":"

      The IP rules of the IP access settings.

      " + } + }, + "documentation":"

      The IP access settings resource that can be associated with a web portal.

      " + }, + "IpAccessSettingsList":{ + "type":"list", + "member":{"shape":"IpAccessSettingsSummary"} + }, + "IpAccessSettingsSummary":{ + "type":"structure", + "members":{ + "creationDate":{ + "shape":"Timestamp", + "documentation":"

      The creation date timestamp of the IP access settings.

      " + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the IP access settings.

      " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

      The display name of the IP access settings.

      " + }, + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of IP access settings.

      " + } + }, + "documentation":"

      The summary of IP access settings.

      " + }, + "IpRange":{ + "type":"string", + "documentation":"

      A single IP address or an IP address range in CIDR notation

      ", + "pattern":"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(?:/([0-9]|[12][0-9]|3[0-2])|)$", + "sensitive":true + }, + "IpRule":{ + "type":"structure", + "required":["ipRange"], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

      The description of the IP rule.

      " + }, + "ipRange":{ + "shape":"IpRange", + "documentation":"

      The IP range of the IP rule.

      " + } + }, + "documentation":"

      The IP rules of the IP access settings.

      " + }, + "IpRuleList":{ + "type":"list", + "member":{"shape":"IpRule"}, + "max":100, + "min":1, + "sensitive":true + }, "KinesisStreamArn":{ "type":"string", "documentation":"

      Kinesis stream ARN to which log events are published.

      ", @@ -2216,6 +2577,36 @@ } } }, + "ListIpAccessSettingsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

      The maximum number of results to be included in the next page.

      ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      The pagination token used to retrieve the next page of results for this operation.

      ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListIpAccessSettingsResponse":{ + "type":"structure", + "members":{ + "ipAccessSettings":{ + "shape":"IpAccessSettingsList", + "documentation":"

      The IP access settings.

      " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

      The pagination token used to retrieve the next page of results for this operation.

      " + } + } + }, "ListNetworkSettingsRequest":{ "type":"structure", "members":{ @@ -2507,6 +2898,10 @@ "shape":"DisplayName", "documentation":"

      The name of the web portal.

      " }, + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      " + }, "networkSettingsArn":{ "shape":"ARN", "documentation":"

      The ARN of the network settings that is associated with the web portal.

      " @@ -2587,6 +2982,10 @@ "shape":"DisplayName", "documentation":"

      The name of the web portal.

      " }, + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      " + }, "networkSettingsArn":{ "shape":"ARN", "documentation":"

      The ARN of the network settings that is associated with the web portal.

      " @@ -2963,6 +3362,45 @@ } } }, + "UpdateIpAccessSettingsRequest":{ + "type":"structure", + "required":["ipAccessSettingsArn"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

      A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request.

      If you do not specify a client token, one is automatically generated by the AWS SDK.

      ", + "idempotencyToken":true + }, + "description":{ + "shape":"Description", + "documentation":"

      The description of the IP access settings.

      " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

      The display name of the IP access settings.

      " + }, + "ipAccessSettingsArn":{ + "shape":"ARN", + "documentation":"

      The ARN of the IP access settings.

      ", + "location":"uri", + "locationName":"ipAccessSettingsArn" + }, + "ipRules":{ + "shape":"IpRuleList", + "documentation":"

      The updated IP rules of the IP access settings.

      " + } + } + }, + "UpdateIpAccessSettingsResponse":{ + "type":"structure", + "required":["ipAccessSettings"], + "members":{ + "ipAccessSettings":{ + "shape":"IpAccessSettings", + "documentation":"

      The IP access settings.

      " + } + } + }, "UpdateNetworkSettingsRequest":{ "type":"structure", "required":["networkSettingsArn"], diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 7149f114bb40..7bea769cf3ab 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 2b6b924cb58e..dfd86e25a7e0 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index eadd307d881c..258f9893ad51 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java index 029a19447047..8ba47dee79cf 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java @@ -17,7 +17,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; -import static software.amazon.awssdk.core.HttpChecksumConstant.HTTP_CHECKSUM_VALUE; import io.reactivex.Flowable; import java.io.IOException; @@ -28,7 +27,6 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; -import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -38,9 +36,6 @@ import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder; import software.amazon.awssdk.core.checksums.Algorithm; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; import software.amazon.awssdk.http.HttpExecuteResponse; @@ -103,11 +98,6 @@ public void setup() throws IOException { }); } - @After - public void clear() { - CaptureChecksumValueInterceptor.reset(); - } - @Test public void sync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { // jsonClient.flexibleCheckSumOperationWithShaChecksum(r -> r.stringMember("Hello world")); @@ -118,9 +108,6 @@ public void sync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getSyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("M68rRwFal7o7B3KEMt3m0w39TaA="); // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("M68rRwFal7o7B3KEMt3m0w39TaA="); - } @Test @@ -133,9 +120,6 @@ public void aync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("M68rRwFal7o7B3KEMt3m0w39TaA="); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("M68rRwFal7o7B3KEMt3m0w39TaA="); - - } @Test @@ -148,9 +132,6 @@ public void sync_xml_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getSyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); - } @Test @@ -169,9 +150,6 @@ public void sync_xml_nonStreaming_unsignedEmptyPayload_with_Sha1_in_header() { // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isNull(); - } @Test @@ -185,8 +163,6 @@ public void aync_xml_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); - } @Test @@ -206,8 +182,6 @@ public void aync_xml_nonStreaming_unsignedEmptyPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).isNotPresent(); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isNull(); - } private SdkHttpRequest getSyncRequest() { @@ -224,32 +198,15 @@ private SdkHttpRequest getAsyncRequest() { private & AwsClientBuilder> T initializeSync(T syncClientBuilder) { - return initialize(syncClientBuilder.httpClient(httpClient) - .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureChecksumValueInterceptor()))); + return initialize(syncClientBuilder.httpClient(httpClient)); } private & AwsClientBuilder> T initializeAsync(T asyncClientBuilder) { - return initialize(asyncClientBuilder.httpClient(httpAsyncClient) - .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureChecksumValueInterceptor()))); + return initialize(asyncClientBuilder.httpClient(httpAsyncClient)); } private > T initialize(T clientBuilder) { return clientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()) .region(Region.US_WEST_2); } - - - private static class CaptureChecksumValueInterceptor implements ExecutionInterceptor { - private static String interceptorComputedChecksum; - - private static void reset() { - interceptorComputedChecksum = null; - } - - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - interceptorComputedChecksum = executionAttributes.getAttribute(HTTP_CHECKSUM_VALUE); - - } - } } \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/endpointproviders/AwsEndpointProviderUtilsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/endpointproviders/AwsEndpointProviderUtilsTest.java index 909543b2a601..f61593d07b47 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/endpointproviders/AwsEndpointProviderUtilsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/endpointproviders/AwsEndpointProviderUtilsTest.java @@ -250,6 +250,35 @@ public void setUri_combinesPathsCorrectly() { .isEqualTo("https://override.example.com/a/b/c"); } + @Test + public void setUri_doubleSlash_combinesPathsCorrectly() { + URI clientEndpoint = URI.create("https://override.example.com/a"); + URI requestUri = URI.create("https://override.example.com/a//c"); + URI resolvedUri = URI.create("https://override.example.com/a/b"); + + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(requestUri) + .method(SdkHttpMethod.GET) + .build(); + + assertThat(AwsEndpointProviderUtils.setUri(request, clientEndpoint, resolvedUri).getUri().toString()) + .isEqualTo("https://override.example.com/a/b//c"); + } + + @Test + public void setUri_withTrailingSlashNoPath_combinesPathsCorrectly() { + URI clientEndpoint = URI.create("https://override.example.com/"); + URI requestUri = URI.create("https://override.example.com//a"); + URI resolvedUri = URI.create("https://override.example.com/"); + SdkHttpRequest request = SdkHttpRequest.builder() + .uri(requestUri) + .method(SdkHttpMethod.GET) + .build(); + + assertThat(AwsEndpointProviderUtils.setUri(request, clientEndpoint, resolvedUri).getUri().toString()) + .isEqualTo("https://override.example.com//a"); + } + @Test public void setHeaders_existingValuesOnOverride_combinesWithNewValues() { AwsRequest request = AllTypesRequest.builder() diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java index 0594f635d7ad..c2b701217cf1 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -38,7 +38,9 @@ import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.internal.metrics.SdkErrorType; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; @@ -250,10 +252,35 @@ public void testApiCall_serviceReturnsError_errorInfoIncludedInMetrics() throws assertThat(requestMetrics.metricValues(CoreMetric.UNMARSHALLING_DURATION)).hasOnlyOneElementSatisfying(d -> { assertThat(d).isGreaterThanOrEqualTo(Duration.ZERO); }); + assertThat(requestMetrics.metricValues(CoreMetric.ERROR_TYPE)).containsExactly(SdkErrorType.SERVER_ERROR.toString()); } } } + @Test + public void testApiCall_httpClientThrowsNetworkError_errorTypeIncludedInMetrics() throws IOException { + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenThrow(new IOException("I/O error")); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + thrown.expect(SdkException.class); + try { + client.allTypes(); + } finally { + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + assertThat(capturedCollection.children()).isNotEmpty(); + for (MetricCollection requestMetrics : capturedCollection.children()) { + assertThat(requestMetrics.metricValues(CoreMetric.ERROR_TYPE)).containsExactly(SdkErrorType.IO.toString()); + } + } + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); when(mockResponse.httpResponse()).thenReturn(httpResponse); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java index 54fbf8d5f38e..4ed2df722d6b 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -28,13 +28,13 @@ import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; -import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.internal.metrics.SdkErrorType; import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; @@ -108,6 +108,7 @@ public void apiCall_allRetryAttemptsFailedOfNetworkError() { .isEmpty(); assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) .isGreaterThanOrEqualTo(FIXED_DELAY); + assertThat(requestMetrics.metricValues(CoreMetric.ERROR_TYPE)).containsExactly(SdkErrorType.IO.toString()); }); } @@ -162,6 +163,7 @@ private void verifyFailedApiCallAttemptCollection(MetricCollection requestMetric .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(requestMetrics.metricValues(CoreMetric.ERROR_TYPE)).containsExactly(SdkErrorType.SERVER_ERROR.toString()); } private void verifySuccessfulApiCallAttemptCollection(MetricCollection attemptCollection) { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java deleted file mode 100644 index 7f2b32fa668c..000000000000 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.protocolquery; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.Optional; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.http.ContentStreamProvider; -import software.amazon.awssdk.http.ExecutableHttpRequest; -import software.amazon.awssdk.http.HttpExecuteRequest; -import software.amazon.awssdk.http.SdkHttpClient; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.utils.IoUtils; - -public class MoveQueryParamsToBodyTest { - private static final AwsCredentialsProvider CREDENTIALS = StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); - - private SdkHttpClient mockHttpClient; - - private ProtocolQueryClient client; - - @BeforeEach - public void setup() throws IOException { - mockHttpClient = mock(SdkHttpClient.class); - ExecutableHttpRequest mockRequest = mock(ExecutableHttpRequest.class); - when(mockRequest.call()).thenThrow(new IOException("IO error!")); - when(mockHttpClient.prepareRequest(any())).thenReturn(mockRequest); - } - - @AfterEach - public void teardown() { - if (client != null) { - client.close(); - } - client = null; - } - - @Test - public void customInterceptor_additionalQueryParamsAdded_paramsAlsoMovedToBody() throws IOException { - client = ProtocolQueryClient.builder() - .overrideConfiguration(o -> o.addExecutionInterceptor(new AdditionalQueryParamInterceptor())) - .region(Region.US_WEST_2) - .credentialsProvider(CREDENTIALS) - .httpClient(mockHttpClient) - .build(); - - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpExecuteRequest.class); - - assertThatThrownBy(() -> client.membersInQueryParams(r -> r.stringQueryParam("hello"))) - .isInstanceOf(SdkClientException.class) - .hasMessageContaining("IO"); - - verify(mockHttpClient, atLeast(1)).prepareRequest(requestCaptor.capture()); - - ContentStreamProvider requestContent = requestCaptor.getValue().contentStreamProvider().get(); - - String contentString = IoUtils.toUtf8String(requestContent.newStream()); - - assertThat(contentString).contains("CustomParamName=CustomParamValue"); - } - - private static class AdditionalQueryParamInterceptor implements ExecutionInterceptor { - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - return context.httpRequest().toBuilder() - .putRawQueryParameter("CustomParamName", "CustomParamValue") - .build(); - } - } -} diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 56d8b0cac94d..791ed4b57bba 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index bacab6dd8fff..85eabc4f57f4 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 206a45ce16d7..42ae0b40b332 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 24659ebd8c75..d0f6f1623e59 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/ResourceManagementTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/ResourceManagementTest.java index 2b047680921d..97dceccfb110 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/ResourceManagementTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/ResourceManagementTest.java @@ -23,6 +23,8 @@ import static software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ScheduledExecutorService; + import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; import software.amazon.awssdk.http.SdkHttpClient; @@ -83,6 +85,16 @@ public void executorFromBuilderNotShutdown() { verify(executor, never()).shutdownNow(); } + @Test + public void scheduledExecutorFromBuilderNotShutdown() { + ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); + + asyncClientBuilder().overrideConfiguration(c -> c.scheduledExecutorService(scheduledExecutorService)).build().close(); + + verify(scheduledExecutorService, never()).shutdown(); + verify(scheduledExecutorService, never()).shutdownNow(); + } + public ProtocolRestJsonClientBuilder syncClientBuilder() { return ProtocolRestJsonClient.builder() .region(Region.US_EAST_1) diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java new file mode 100644 index 000000000000..1a17986dc30f --- /dev/null +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java @@ -0,0 +1,205 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocol.tests.connection; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.exception.AbortedException; +import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +/** + * Tests to verify Interruption of Threads while Http Connection is in progress to make sure Resources are released. + */ +class SyncClientConnectionInterruptionTest { + public static final String SAMPLE_BODY = "{\"StringMember" + + "\":\"resultString\"}"; + private final WireMockServer mockServer = new WireMockServer(new WireMockConfiguration() + .bindAddress("localhost").dynamicPort()); + + private static final ExecutorService executorService = Executors.newCachedThreadPool(); + + @BeforeEach + public void setup() { + mockServer.start(); + stubPostRequest(".*", aResponse(), "{}"); + } + + @AfterAll + public static void cleanUp(){ + executorService.shutdownNow(); + } + + @Test + void connectionPoolsGetsReusedWhenInterruptedWith_1_MaxConnection() throws Exception { + Integer responseDelay = 1500; + + String urlRegex = "/2016-03-11/allTypes"; + stubPostRequest(urlRegex, aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + SdkHttpClient httpClient = ApacheHttpClient.builder().maxConnections(1).build(); + ProtocolRestJsonClient client = getClient(httpClient, Duration.ofMillis(2L * responseDelay)).build(); + + Future toBeInterruptedFuture = executorService.submit(() -> client.allTypes()); + unInterruptedSleep(responseDelay - responseDelay / 5); + toBeInterruptedFuture.cancel(true); + // Make sure thread start the Http connections + unInterruptedSleep(50); + AllTypesResponse allTypesResponse = client.allTypes(); + assertThat(allTypesResponse.stringMember()).isEqualTo("resultString"); + executorService.shutdownNow(); + } + + @Test + void interruptionWhenWaitingForLease_AbortsImmediately() throws InterruptedException { + Integer responseDelay = 50000; + ExceptionInThreadRun exceptionInThreadRun = new ExceptionInThreadRun(); + AtomicLong leaseWaitingTime = new AtomicLong(responseDelay); + stubPostRequest("/2016-03-11/allTypes", aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + SdkHttpClient httpClient = ApacheHttpClient.builder().maxConnections(1).build(); + ProtocolRestJsonClient client = getClient(httpClient, Duration.ofMillis(2L * responseDelay)).build(); + executorService.submit(() -> client.allTypes()); + // 1 Sec sleep to make sure Thread 1 is picked for executing Http connection + unInterruptedSleep(1000); + Thread leaseWaitingThread = new Thread(() -> { + + try { + client.allTypes(l -> l.overrideConfiguration( + b -> b + .addMetricPublisher(new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + Optional> apiCallDuration = + metricCollection.stream().filter(o -> "ApiCallDuration".equals(o.metric().name())).findAny(); + leaseWaitingTime.set(Duration.parse(apiCallDuration.get().value().toString()).toMillis()); + } + + @Override + public void close() { + } + }) + )); + + } catch (Exception exception) { + exceptionInThreadRun.setException(exception); + + } + }); + + leaseWaitingThread.start(); + // 1 sec sleep to make sure Http connection execution is initialized for Thread 2 , in this case it will wait for lease + // and immediately terminate on interrupt + unInterruptedSleep(1000); + leaseWaitingThread.interrupt(); + leaseWaitingThread.join(); + assertThat(leaseWaitingTime.get()).isNotEqualTo(responseDelay.longValue()); + assertThat(leaseWaitingTime.get()).isLessThan(responseDelay.longValue()); + assertThat(exceptionInThreadRun.getException()).isInstanceOf(AbortedException.class); + client.close(); + } + + /** + * Service Latency is set to high value say X. + * Api timeout value id set to 1/3 of X. + * And we interrupt the thread at 90% of X. + * In this case since the ApiTimeOut first happened we should get ApiTimeOut Exception and not the interrupt. + */ + @Test + void interruptionDueToApiTimeOut_followed_byInterruptCausesOnlyTimeOutException() throws InterruptedException { + SdkHttpClient httpClient = ApacheHttpClient.create(); + Integer responseDelay = 3000; + stubPostRequest("/2016-03-11/allTypes", aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + ExceptionInThreadRun exception = new ExceptionInThreadRun(); + ProtocolRestJsonClient client = + getClient(httpClient, Duration.ofMillis(10)).overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())).build(); + unInterruptedSleep(100); + // We need to creat a separate thread to interrupt it externally. + Thread leaseWaitingThread = new Thread(() -> { + try { + client.allTypes(l -> l.overrideConfiguration(b -> b.apiCallAttemptTimeout(Duration.ofMillis(responseDelay / 3)))); + } catch (Exception e) { + exception.setException(e); + } + }); + leaseWaitingThread.start(); + unInterruptedSleep(responseDelay - responseDelay / 10); + leaseWaitingThread.interrupt(); + leaseWaitingThread.join(); + assertThat(exception.getException()).isInstanceOf(ApiCallAttemptTimeoutException.class); + client.close(); + } + + private class ExceptionInThreadRun { + private Exception exception; + public Exception getException() { + return exception; + } + public void setException(Exception exception) { + this.exception = exception; + } + } + + static void unInterruptedSleep(long millis){ + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + throw new IllegalStateException("This test sleep is not be interrupted"); + } + } + + private void stubPostRequest(String urlRegex, ResponseDefinitionBuilder LONG_DELAY, String body) { + mockServer.stubFor(post(urlMatching(urlRegex)) + .willReturn(LONG_DELAY + .withStatus(200) + .withBody(body))); + } + private ProtocolRestJsonClientBuilder getClient(SdkHttpClient httpClient, Duration timeOutDuration) { + return ProtocolRestJsonClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .httpClient(httpClient) + .overrideConfiguration(o -> o.apiCallTimeout(timeOutDuration)); + + } +} diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index b97860363854..032c30269760 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index 18fab3947e5d..9cc631689f3b 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 9b141a2b8e3b..8ab5691ed110 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index 3e215615c1f0..727b9d92b7fc 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index cbbec18d056c..8315ccb5a6bd 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index b5f115be1b20..9a3fd929722e 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 869ca5bfeddb..9e8d72d54fa7 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 6fcaf81e68ed..65dbf0d9290d 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 185c7f2bab95..ecc073de7973 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/third-party/pom.xml b/third-party/pom.xml index 2ee9d980558e..fa283581d3ec 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index 1a1df883e563..9772231a5c88 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index fe01ae19580f..ad350d314d17 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 46cbd3f849b7..96c3501de6f1 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.68-SNAPSHOT + 2.20.93-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java index e7fd8c015e1d..192ea7cead9b 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java @@ -117,6 +117,80 @@ public static ByteArrayInputStream toStream(ByteBuffer byteBuffer) { return new ByteArrayInputStream(copyBytesFrom(byteBuffer)); } + /** + * Returns an immutable copy of the given {@code ByteBuffer}. + *

      + * The new buffer's position will be set to the position of the given {@code ByteBuffer}, but the mark if defined will be + * ignored. + *

      + * NOTE: this method intentionally converts direct buffers to non-direct though there is no guarantee this will always + * be the case, if this is required see {@link #toNonDirectBuffer(ByteBuffer)} + * + * @param bb the source {@code ByteBuffer} to copy. + * @return a read only {@code ByteBuffer}. + */ + public static ByteBuffer immutableCopyOf(ByteBuffer bb) { + if (bb == null) { + return null; + } + int sourceBufferPosition = bb.position(); + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + readOnlyCopy.rewind(); + ByteBuffer cloned = ByteBuffer.allocate(readOnlyCopy.capacity()) + .put(readOnlyCopy); + cloned.position(sourceBufferPosition); + return cloned.asReadOnlyBuffer(); + } + + /** + * Returns an immutable copy of the remaining bytes of the given {@code ByteBuffer}. + *

      + * NOTE: this method intentionally converts direct buffers to non-direct though there is no guarantee this will always + * be the case, if this is required see {@link #toNonDirectBuffer(ByteBuffer)} + * + * @param bb the source {@code ByteBuffer} to copy. + * @return a read only {@code ByteBuffer}. + */ + public static ByteBuffer immutableCopyOfRemaining(ByteBuffer bb) { + if (bb == null) { + return null; + } + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + ByteBuffer cloned = ByteBuffer.allocate(readOnlyCopy.remaining()) + .put(readOnlyCopy); + cloned.flip(); + return cloned.asReadOnlyBuffer(); + } + + /** + * Returns a copy of the given {@code DirectByteBuffer} from its current position as a non-direct {@code HeapByteBuffer} + *

      + * The new buffer's position will be set to the position of the given {@code ByteBuffer}, but the mark if defined will be + * ignored. + * + * @param bb the source {@code ByteBuffer} to copy. + * @return {@code ByteBuffer}. + */ + public static ByteBuffer toNonDirectBuffer(ByteBuffer bb) { + if (bb == null) { + return null; + } + if (!bb.isDirect()) { + throw new IllegalArgumentException("Provided ByteBuffer is already non-direct"); + } + int sourceBufferPosition = bb.position(); + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + readOnlyCopy.rewind(); + ByteBuffer cloned = ByteBuffer.allocate(bb.capacity()) + .put(readOnlyCopy); + cloned.rewind(); + cloned.position(sourceBufferPosition); + if (bb.isReadOnly()) { + return cloned.asReadOnlyBuffer(); + } + return cloned; + } + /** * Returns a copy of all the bytes from the given ByteBuffer, * from the beginning to the buffer's limit; or null if the input is null. diff --git a/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java b/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java new file mode 100644 index 000000000000..487ae82df18d --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java @@ -0,0 +1,150 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + + +import software.amazon.awssdk.annotations.SdkProtectedApi; + +@SdkProtectedApi +public final class ClassLoaderHelper { + + private ClassLoaderHelper() { + } + + private static Class loadClassViaClasses(String fqcn, Class[] classes) { + if (classes == null) { + return null; + } + + for (Class clzz: classes) { + if (clzz == null) { + continue; + } + ClassLoader loader = clzz.getClassLoader(); + if (loader != null) { + try { + return loader.loadClass(fqcn); + } catch (ClassNotFoundException e) { + // move on to try the next class loader + } + } + } + return null; + } + + private static Class loadClassViaContext(String fqcn) { + ClassLoader loader = contextClassLoader(); + try { + return loader == null ? null : loader.loadClass(fqcn); + } catch (ClassNotFoundException e) { + // Ignored. + } + return null; + } + + /** + * Loads the class via the optionally specified classes in the order of + * their specification, and if not found, via the context class loader of + * the current thread, and if not found, from the caller class loader as the + * last resort. + * + * @param fqcn + * fully qualified class name of the target class to be loaded + * @param classes + * class loader providers + * @return the class loaded; never null + * + * @throws ClassNotFoundException + * if failed to load the class + */ + public static Class loadClass(String fqcn, Class... classes) throws ClassNotFoundException { + return loadClass(fqcn, true, classes); + } + + /** + * If classesFirst is false, loads the class via the context class + * loader of the current thread, and if not found, via the class loaders of + * the optionally specified classes in the order of their specification, and + * if not found, from the caller class loader as the + * last resort. + *

      + * If classesFirst is true, loads the class via the optionally + * specified classes in the order of their specification, and if not found, + * via the context class loader of the current thread, and if not found, + * from the caller class loader as the last resort. + * + * @param fqcn + * fully qualified class name of the target class to be loaded + * @param classesFirst + * true if the class loaders of the optionally specified classes + * take precedence over the context class loader of the current + * thread; false if the opposite is true. + * @param classes + * class loader providers + * @return the class loaded; never null + * + * @throws ClassNotFoundException if failed to load the class + */ + public static Class loadClass(String fqcn, boolean classesFirst, + Class... classes) throws ClassNotFoundException { + Class target = null; + if (classesFirst) { + target = loadClassViaClasses(fqcn, classes); + if (target == null) { + target = loadClassViaContext(fqcn); + } + } else { + target = loadClassViaContext(fqcn); + if (target == null) { + target = loadClassViaClasses(fqcn, classes); + } + } + return target == null ? Class.forName(fqcn) : target; + } + + /** + * Attempt to get the current thread's class loader and fallback to the system classloader if null + * @return a {@link ClassLoader} or null if none found + */ + private static ClassLoader contextClassLoader() { + ClassLoader threadClassLoader = Thread.currentThread().getContextClassLoader(); + if (threadClassLoader != null) { + return threadClassLoader; + } + return ClassLoader.getSystemClassLoader(); + } + + /** + * Attempt to get class loader that loads the classes and fallback to the thread context classloader if null. + * + * @param classes the classes + * @return a {@link ClassLoader} or null if none found + */ + public static ClassLoader classLoader(Class... classes) { + if (classes != null) { + for (Class clzz : classes) { + ClassLoader classLoader = clzz.getClassLoader(); + + if (classLoader != null) { + return classLoader; + } + } + } + + return contextClassLoader(); + } + +} \ No newline at end of file diff --git a/utils/src/main/java/software/amazon/awssdk/utils/ScheduledExecutorUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/ScheduledExecutorUtils.java new file mode 100644 index 000000000000..d6e33f7a607f --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/ScheduledExecutorUtils.java @@ -0,0 +1,157 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + +import static software.amazon.awssdk.utils.Validate.paramNotNull; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; + +/** + * Utilities that make it easier to create, use and destroy + * {@link ScheduledExecutor}s. + */ +@SdkProtectedApi +public final class ScheduledExecutorUtils { + private ScheduledExecutorUtils() { + } + + /** + * Wrap a scheduled executor in a type that cannot be closed, or shut down. + */ + public static ScheduledExecutorService unmanagedScheduledExecutor(ScheduledExecutorService executor) { + return new UnmanagedScheduledExecutorService(executor); + } + + /** + * Wrapper around {@link ScheduledExecutorService} to prevent it from being + * closed. Used when the customer provides + * a custom scheduled executor service in which case they are responsible for + * the lifecycle of it. + */ + @SdkTestInternalApi + public static final class UnmanagedScheduledExecutorService implements ScheduledExecutorService { + + private final ScheduledExecutorService delegate; + + UnmanagedScheduledExecutorService(ScheduledExecutorService delegate) { + this.delegate = paramNotNull(delegate, "ScheduledExecutorService"); + } + + public ScheduledExecutorService scheduledExecutorService() { + return delegate; + } + + @Override + public void shutdown() { + // Do nothing, this executor service is managed by the customer. + } + + @Override + public List shutdownNow() { + return new ArrayList<>(); + } + + @Override + public boolean isShutdown() { + return delegate.isShutdown(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return delegate.awaitTermination(timeout, unit); + } + + @Override + public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + return delegate.schedule(command, delay, unit); + } + + @Override + public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + return delegate.schedule(callable, delay, unit); + } + + @Override + public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { + return delegate.scheduleAtFixedRate(command, initialDelay, period, unit); + } + + @Override + public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, + TimeUnit unit) { + return delegate.scheduleWithFixedDelay(command, initialDelay, delay, unit); + } + + @Override + public boolean isTerminated() { + return delegate.isTerminated(); + } + + @Override + public Future submit(Callable task) { + return delegate.submit(task); + } + + @Override + public Future submit(Runnable task, T result) { + return delegate.submit(task, result); + } + + @Override + public Future submit(Runnable task) { + return delegate.submit(task); + } + + @Override + public List> invokeAll(Collection> tasks) throws InterruptedException { + return delegate.invokeAll(tasks); + } + + @Override + public List> invokeAll(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException { + return delegate.invokeAll(tasks, timeout, unit); + } + + @Override + public T invokeAny(Collection> tasks) + throws InterruptedException, ExecutionException { + return delegate.invokeAny(tasks); + } + + @Override + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return delegate.invokeAny(tasks, timeout, unit); + } + + @Override + public void execute(Runnable command) { + delegate.execute(command); + } + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java index 5f255d347adc..4e416ea9e3b6 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java @@ -16,9 +16,11 @@ package software.amazon.awssdk.utils; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.ByteBuffer; @@ -32,13 +34,11 @@ public class BinaryUtilsTest { public void testHex() { { String hex = BinaryUtils.toHex(new byte[] {0}); - System.out.println(hex); String hex2 = Base16Lower.encodeAsString(new byte[] {0}); assertEquals(hex, hex2); } { String hex = BinaryUtils.toHex(new byte[] {-1}); - System.out.println(hex); String hex2 = Base16Lower.encodeAsString(new byte[] {-1}); assertEquals(hex, hex2); } @@ -169,7 +169,7 @@ public void testCopyRemainingBytesFrom_nullBuffer() { @Test public void testCopyRemainingBytesFrom_noRemainingBytes() { ByteBuffer bb = ByteBuffer.allocate(1); - bb.put(new byte[]{1}); + bb.put(new byte[] {1}); bb.flip(); bb.get(); @@ -180,7 +180,7 @@ public void testCopyRemainingBytesFrom_noRemainingBytes() { @Test public void testCopyRemainingBytesFrom_fullBuffer() { ByteBuffer bb = ByteBuffer.allocate(4); - bb.put(new byte[]{1, 2, 3, 4}); + bb.put(new byte[] {1, 2, 3, 4}); bb.flip(); byte[] copy = BinaryUtils.copyRemainingBytesFrom(bb); @@ -191,7 +191,7 @@ public void testCopyRemainingBytesFrom_fullBuffer() { @Test public void testCopyRemainingBytesFrom_partiallyReadBuffer() { ByteBuffer bb = ByteBuffer.allocate(4); - bb.put(new byte[]{1, 2, 3, 4}); + bb.put(new byte[] {1, 2, 3, 4}); bb.flip(); bb.get(); @@ -201,4 +201,137 @@ public void testCopyRemainingBytesFrom_partiallyReadBuffer() { assertThat(bb).isEqualTo(ByteBuffer.wrap(copy)); assertThat(copy).hasSize(2); } + + @Test + public void testImmutableCopyOfByteBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] originalBytesInSource = {1, 2, 3, 4}; + sourceBuffer.put(originalBytesInSource); + sourceBuffer.flip(); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(sourceBuffer); + + byte[] bytesInSourceAfterCopy = {-1, -2, -3, -4}; + sourceBuffer.put(bytesInSourceAfterCopy); + sourceBuffer.flip(); + + assertTrue(immutableCopy.isReadOnly()); + byte[] fromImmutableCopy = new byte[originalBytesInSource.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(originalBytesInSource, fromImmutableCopy); + + assertEquals(0, sourceBuffer.position()); + byte[] fromSource = new byte[bytesInSourceAfterCopy.length]; + sourceBuffer.get(fromSource); + assertArrayEquals(bytesInSourceAfterCopy, fromSource); + } + + @Test + public void testImmutableCopyOfByteBuffer_nullBuffer() { + assertNull(BinaryUtils.immutableCopyOf(null)); + } + + @Test + public void testImmutableCopyOfByteBuffer_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(sourceBuffer); + + assertEquals(sourceBuffer.position(), immutableCopy.position()); + immutableCopy.rewind(); + byte[] fromImmutableCopy = new byte[bytes.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(bytes, fromImmutableCopy); + } + + @Test + public void testImmutableCopyOfRemainingByteBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] originalBytesInSource = {1, 2, 3, 4}; + sourceBuffer.put(originalBytesInSource); + sourceBuffer.flip(); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(sourceBuffer); + + byte[] bytesInSourceAfterCopy = {-1, -2, -3, -4}; + sourceBuffer.put(bytesInSourceAfterCopy); + sourceBuffer.flip(); + + assertTrue(immutableCopy.isReadOnly()); + byte[] fromImmutableCopy = new byte[originalBytesInSource.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(originalBytesInSource, fromImmutableCopy); + + assertEquals(0, sourceBuffer.position()); + byte[] fromSource = new byte[bytesInSourceAfterCopy.length]; + sourceBuffer.get(fromSource); + assertArrayEquals(bytesInSourceAfterCopy, fromSource); + } + + @Test + public void testImmutableCopyOfByteBufferRemaining_nullBuffer() { + assertNull(BinaryUtils.immutableCopyOfRemaining(null)); + } + + @Test + public void testImmutableCopyOfByteBufferRemaining_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(sourceBuffer); + + assertEquals(2, immutableCopy.capacity()); + assertEquals(2, immutableCopy.remaining()); + assertEquals(0, immutableCopy.position()); + assertEquals((byte) 3, immutableCopy.get()); + assertEquals((byte) 4, immutableCopy.get()); + } + + @Test + public void testToNonDirectBuffer() { + ByteBuffer bb = ByteBuffer.allocateDirect(4); + byte[] expected = {1, 2, 3, 4}; + bb.put(expected); + bb.flip(); + + ByteBuffer nonDirectBuffer = BinaryUtils.toNonDirectBuffer(bb); + + assertFalse(nonDirectBuffer.isDirect()); + byte[] bytes = new byte[expected.length]; + nonDirectBuffer.get(bytes); + assertArrayEquals(expected, bytes); + } + + @Test + public void testToNonDirectBuffer_nullBuffer() { + assertNull(BinaryUtils.toNonDirectBuffer(null)); + } + + @Test + public void testToNonDirectBuffer_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocateDirect(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer nonDirectBuffer = BinaryUtils.toNonDirectBuffer(sourceBuffer); + + assertEquals(sourceBuffer.position(), nonDirectBuffer.position()); + nonDirectBuffer.rewind(); + byte[] fromNonDirectBuffer = new byte[bytes.length]; + nonDirectBuffer.get(fromNonDirectBuffer); + assertArrayEquals(bytes, fromNonDirectBuffer); + } + + @Test + public void testToNonDirectBuffer_nonDirectBuffer() { + ByteBuffer nonDirectBuffer = ByteBuffer.allocate(0); + assertThrows(IllegalArgumentException.class, () -> BinaryUtils.toNonDirectBuffer(nonDirectBuffer)); + } + }