From 7ef8830386a404274571e93fd279f3bb02afff62 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 8 Mar 2023 10:35:03 -0800 Subject: [PATCH 01/32] New API for the retries module (#3769) This new module includes the interfaces and classes that will be used to implement the new retry logic within the SDK. --- .../feature-AWSSDKforJavav2-b456b1c.json | 6 + core/pom.xml | 1 + core/retries-api/pom.xml | 68 ++++++ .../api/AcquireInitialTokenRequest.java | 36 +++ .../api/AcquireInitialTokenResponse.java | 38 ++++ .../awssdk/retries/api/BackoffStrategy.java | 36 +++ .../retries/api/RecordSuccessRequest.java | 33 +++ .../retries/api/RecordSuccessResponse.java | 33 +++ .../retries/api/RefreshRetryTokenRequest.java | 47 ++++ .../api/RefreshRetryTokenResponse.java | 38 ++++ .../awssdk/retries/api/RetryStrategy.java | 207 ++++++++++++++++++ .../amazon/awssdk/retries/api/RetryToken.java | 32 +++ .../api/TokenAcquisitionFailedException.java | 58 +++++ .../retries/api/RetryStrategyBuilderTest.java | 175 +++++++++++++++ test/tests-coverage-reporting/pom.xml | 5 + 15 files changed, 813 insertions(+) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-b456b1c.json create mode 100644 core/retries-api/pom.xml create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java create mode 100644 core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java diff --git a/.changes/next-release/feature-AWSSDKforJavav2-b456b1c.json b/.changes/next-release/feature-AWSSDKforJavav2-b456b1c.json new file mode 100644 index 000000000000..45ba68b046ee --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-b456b1c.json @@ -0,0 +1,6 @@ +{ + "category": "AWS SDK for Java v2", + "contributor": "sugmanue", + "type": "feature", + "description": "Adds the new module retries API module" +} diff --git a/core/pom.xml b/core/pom.xml index 60ec9b380b09..0237a0e67d22 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -46,6 +46,7 @@ json-utils endpoints-spi imds + retries-api diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml new file mode 100644 index 000000000000..1c9b4a34c3cf --- /dev/null +++ b/core/retries-api/pom.xml @@ -0,0 +1,68 @@ + + + + + + core + software.amazon.awssdk + 2.20.4-SNAPSHOT + + 4.0.0 + + retries-api + AWS Java SDK :: Retries API + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.retries.api + + + + + + + + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + org.hamcrest + hamcrest-all + test + + + diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java new file mode 100644 index 000000000000..a1e99a107442 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Encapsulates the abstract scope to start the attempts about to be executed using a retry strategy. + */ +@SdkPublicApi +@ThreadSafe +public interface AcquireInitialTokenRequest { + /** + * An abstract scope for the attempts about to be executed. + * + *

A scope should be a unique string describing the smallest possible scope of failure for the attempts about to be + * executed. In practical terms, this is a key for the token bucket used to throttle request attempts. All attempts with the + * same scope share the same token bucket within the same {@link RetryStrategy}, ensuring that token-bucket throttling for + * requests against one resource do not result in throttling for requests against other, unrelated resources. + */ + String scope(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java new file mode 100644 index 000000000000..96315075ad8e --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Encapsulates the response from the {@link RetryStrategy} to the request to start the attempts to be executed. + */ +@SdkPublicApi +@ThreadSafe +public interface AcquireInitialTokenResponse { + /** + * A {@link RetryToken} acquired by this invocation, used in subsequent {@link RetryStrategy#refreshRetryToken} or + * {@link RetryStrategy#recordSuccess} calls. + */ + RetryToken token(); + + /** + * The amount of time to wait before performing the first attempt. + */ + Duration delay(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java new file mode 100644 index 000000000000..21e624ed0e1d --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Determines how long to wait before each execution attempt. + */ +@SdkPublicApi +@ThreadSafe +public interface BackoffStrategy { + + /** + * Compute the amount of time to wait before the provided attempt number is executed. + * + * @param attempt The attempt to compute the delay for, starting at one. + * @throws IllegalArgumentException If the given attempt is less or equal to zero. + */ + Duration computeDelay(int attempt); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java new file mode 100644 index 000000000000..ff1e2aa8b4cf --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Request that the calling code makes to the {@link RetryStrategy} using + * {@link RetryStrategy#recordSuccess(RecordSuccessRequest)} to notify that the attempted execution succeeded. + */ +@SdkPublicApi +@ThreadSafe +public interface RecordSuccessRequest { + /** + * A {@link RetryToken} acquired a previous {@link RetryStrategy#acquireInitialToken} or + * {@link RetryStrategy#refreshRetryToken} call. + */ + RetryToken token(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java new file mode 100644 index 000000000000..bf3f4b83caf4 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Response given to the calling code by the {@link RetryStrategy} after calling + * {@link RetryStrategy#recordSuccess(RecordSuccessRequest)}. + */ +@SdkPublicApi +@ThreadSafe +public interface RecordSuccessResponse { + /** + * A {@link RetryToken} acquired a previous {@link RetryStrategy#acquireInitialToken} or + * {@link RetryStrategy#refreshRetryToken} call. + */ + RetryToken token(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java new file mode 100644 index 000000000000..706a017b60c4 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import java.time.Duration; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Request that the calling code makes to the {@link RetryStrategy} using + * {@link RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)} to notify that the attempted execution failed and the + * {@link RetryToken} needs to be refreshed. + */ +@SdkPublicApi +@ThreadSafe +public interface RefreshRetryTokenRequest { + /** + * A {@link RetryToken} acquired a previous {@link RetryStrategy#acquireInitialToken} or + * {@link RetryStrategy#refreshRetryToken} call. + */ + RetryToken token(); + + /** + * A suggestion of how long to wait from the last attempt failure. For HTTP calls, this is usually extracted from a "retry + * after" header from the downstream service. + */ + Optional suggestedDelay(); + + /** + * The cause of the last attempt failure. + */ + Throwable failure(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java new file mode 100644 index 000000000000..31b231426779 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * Response from the {@link RetryStrategy} after calling {@link RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)}. + */ +@SdkPublicApi +@ThreadSafe +public interface RefreshRetryTokenResponse { + /** + * A {@link RetryToken} acquired by this invocation, used in subsequent {@link RetryStrategy#refreshRetryToken} or + * {@link RetryStrategy#recordSuccess} calls. + */ + RetryToken token(); + + /** + * The amount of time to wait before performing the next attempt. + */ + Duration delay(); +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java new file mode 100644 index 000000000000..c02d97411a43 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -0,0 +1,207 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A strategy used by an SDK to determine when something should be retried. + * + *

We do not recommend SDK users create their own retry strategies. We recommend refining an + * existing strategy: + *

    + *
  1. If you are using the strategy with a service, you can get the existing strategy + * from that service via {@code [ServiceName]Client.defaults().retryStrategy()}. + *
  2. {@code RetryStrategies} from the {@code software.amazon.awssdk:retries} module. + *
+ * + *

Terminology: + *

    + *
  1. An attempt is a single invocation of an action. + *
  2. The attempt count is which attempt (starting with 1) the SDK is attempting to + * make. + *
+ */ +@ThreadSafe +@SdkPublicApi +public interface RetryStrategy extends ToCopyableBuilder { + /** + * Invoked before the first request attempt. + * + *

Callers MUST wait for the {@code delay} returned by this call before making the first attempt. Callers that wish to + * retry a failed attempt MUST call {@link #refreshRetryToken} before doing so. + * + *

If the attempt was successful, callers MUST call {@link #recordSuccess}. + * + * @throws NullPointerException if a required parameter is not specified + * @throws TokenAcquisitionFailedException if a token cannot be acquired + */ + AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request); + + /** + * Invoked before each subsequent (non-first) request attempt. + * + *

Callers MUST wait for the {@code delay} returned by this call before making the next attempt. If the next attempt + * fails, callers MUST re-call {@link #refreshRetryToken} before attempting another retry. This call invalidates the provided + * token, and returns a new one. Callers MUST use the new token. + * + *

If the attempt was successful, callers MUST call {@link #recordSuccess}. + * + * @throws NullPointerException if a required parameter is not specified + * @throws IllegalArgumentException if the provided token was not issued by this strategy or the provided token was + * already used for a previous refresh or success call. + * @throws TokenAcquisitionFailedException if a token cannot be acquired + */ + RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request); + + /** + * Invoked after an attempt succeeds. + * + * @throws NullPointerException if a required parameter is not specified + * @throws IllegalArgumentException if the provided token was not issued by this strategy or the provided token was already + * used for a previous refresh or success call. + */ + RecordSuccessResponse recordSuccess(RecordSuccessRequest request); + + /** + * Create a new {@link Builder} with the current configuration. + * + *

This is useful for modifying the strategy's behavior, like conditions or max retries. + */ + @Override + Builder toBuilder(); + + /** + * Builder to create immutable instances of {@link RetryStrategy}. + */ + interface Builder extends CopyableBuilder { + /** + * Configure the strategy to retry when the provided predicate returns true, given a failure exception. + */ + Builder retryOnException(Predicate shouldRetry); + + /** + * Configure the strategy to retry when a failure exception class is equal to the provided class. + */ + default Builder retryOnException(Class throwable) { + return retryOnException(t -> t.getClass() == throwable); + } + + /** + * Configure the strategy to retry when a failure exception class is an instance of the provided class (includes + * subtypes). + */ + default Builder retryOnExceptionInstanceOf(Class throwable) { + return retryOnException(t -> throwable.isAssignableFrom(t.getClass())); + } + + /** + * Configure the strategy to retry when a failure exception or one of its cause classes is equal to the provided class. + */ + default Builder retryOnExceptionOrCause(Class throwable) { + return retryOnException(t -> { + if (t.getClass() == throwable) { + return true; + } + Throwable cause = t.getCause(); + while (cause != null) { + if (cause.getClass() == throwable) { + return true; + } + cause = cause.getCause(); + } + return false; + }); + } + + /** + * Configure the strategy to retry when a failure exception or one of its cause classes is an instance of the provided + * class (includes subtypes). + */ + default Builder retryOnExceptionOrCauseInstanceOf(Class throwable) { + return retryOnException(t -> { + if (throwable.isAssignableFrom(t.getClass())) { + return true; + } + Throwable cause = t.getCause(); + while (cause != null) { + if (throwable.isAssignableFrom(cause.getClass())) { + return true; + } + cause = cause.getCause(); + } + return false; + }); + } + + /** + * Configure the strategy to retry the root cause of a failure (the final cause) a failure exception is equal to the + * provided class. + */ + default Builder retryOnRootCause(Class throwable) { + return retryOnException(t -> { + boolean shouldRetry = false; + Throwable cause = t.getCause(); + while (cause != null) { + shouldRetry = throwable == cause.getClass(); + cause = cause.getCause(); + } + return shouldRetry; + }); + } + + /** + * Configure the strategy to retry the root cause of a failure (the final cause) a failure exception is an instance of to + * the provided class (includes subtypes). + */ + default Builder retryOnRootCauseInstanceOf(Class throwable) { + return retryOnException(t -> { + boolean shouldRetry = false; + Throwable cause = t.getCause(); + while (cause != null) { + shouldRetry = throwable.isAssignableFrom(cause.getClass()); + cause = cause.getCause(); + } + return shouldRetry; + }); + } + + /** + * Configure the maximum number of attempts used by this executor. + * + *

The actual number of attempts made may be less, depending on the retry strategy implementation. For example, the + * standard and adaptive retry modes both employ short-circuiting which reduces the maximum attempts during outages. + * + *

The default value for the standard and adaptive retry strategies is 3. + */ + Builder maxAttempts(int maxAttempts); + + /** + * Configure the predicate to allow the strategy categorize a Throwable as throttling exception. + */ + Builder treatAsThrottling(Predicate treatAsThrottling); + + /** + * Build a new {@link RetryStrategy} with the current configuration on this builder. + */ + @Override + RetryStrategy build(); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java new file mode 100644 index 000000000000..d07e4482127f --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; + +/** + * An opaque token representing an in-progress execution. + * + *

Created via {@link RetryStrategy#acquireInitialToken} before a first attempt and refreshed + * after each attempt failure via {@link RetryStrategy#refreshRetryToken}. + * + *

Released via {@link RetryStrategy#recordSuccess} after a successful attempt. + */ +@SdkPublicApi +@ThreadSafe +public interface RetryToken { +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java new file mode 100644 index 000000000000..b86e68c1964f --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * Exception thrown by {@link RetryStrategy} when a new token cannot be acquired. + */ +@SdkPublicApi +public final class TokenAcquisitionFailedException extends RuntimeException { + private final RetryToken token; + + /** + * Exception construction accepting message with no root cause. + */ + public TokenAcquisitionFailedException(String msg) { + super(msg); + token = null; + } + + /** + * Exception constructor accepting message and a root cause. + */ + public TokenAcquisitionFailedException(String msg, Throwable cause) { + super(msg, cause); + token = null; + } + + /** + * Exception constructor accepting message, retry token, and a root cause. + */ + public TokenAcquisitionFailedException(String msg, RetryToken token, Throwable cause) { + super(msg, cause); + this.token = token; + } + + /** + * Returns the retry token that tracked the execution. + * @return the retry token that tracked the execution. + */ + public RetryToken token() { + return token; + } +} diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java new file mode 100644 index 000000000000..ee14fc9fceda --- /dev/null +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -0,0 +1,175 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +import java.util.Arrays; +import java.util.Collection; +import java.util.function.Function; +import java.util.function.Predicate; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +public class RetryStrategyBuilderTest { + + public static Collection parameters() { + return Arrays.asList( + new TestCase() + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenThrowable(new IllegalArgumentException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException()) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenThrowable(new NumberFormatException()) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new IllegalArgumentException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new NumberFormatException()) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new IllegalStateException())) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new IllegalArgumentException())) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new RuntimeException(new IllegalArgumentException()))) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new RuntimeException(new NumberFormatException()))) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new IllegalArgumentException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException()) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new IllegalArgumentException())) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new NumberFormatException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new IllegalArgumentException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException()) + .expectShouldNotRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new IllegalArgumentException())) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new NumberFormatException()) + .expectShouldRetry() + , new TestCase() + .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) + .givenThrowable(new RuntimeException(new RuntimeException(new NumberFormatException()))) + .expectShouldRetry() + ); + } + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + assertThat(testCase.run(), equalTo(testCase.expected())); + } + + static class TestCase { + private final BuilderToTestDefaults builder = new BuilderToTestDefaults(); + private Throwable testThrowable; + private boolean expectedTestResult; + + TestCase configure(Function configure) { + configure.apply(builder); + return this; + } + + TestCase givenThrowable(Throwable testThrowable) { + this.testThrowable = testThrowable; + return this; + } + + TestCase expectShouldRetry() { + this.expectedTestResult = true; + return this; + } + + TestCase expectShouldNotRetry() { + this.expectedTestResult = false; + return this; + } + + boolean run() { + return builder.shouldRetryCapture().test(testThrowable); + } + + boolean expected() { + return expectedTestResult; + } + } + + static class BuilderToTestDefaults implements RetryStrategy.Builder { + Predicate shouldRetryCapture = null; + + Predicate shouldRetryCapture() { + return shouldRetryCapture; + } + + @Override + public RetryStrategy.Builder retryOnException(Predicate shouldRetry) { + shouldRetryCapture = shouldRetry; + return this; + } + + @Override + public RetryStrategy.Builder maxAttempts(int maxAttempts) { + return this; + } + + @Override + public RetryStrategy.Builder treatAsThrottling(Predicate treatAsThrottling) { + return this; + } + + @Override + public RetryStrategy build() { + return null; + } + } +} diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index af3a624ca5cb..052841d55391 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -37,6 +37,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + retries-api + software.amazon.awssdk + ${awsjavasdk.version} + utils software.amazon.awssdk From f40dd27f9cb4b06fa2b209ee033d325065d4c6d3 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 19 Apr 2023 14:46:23 -0700 Subject: [PATCH 02/32] Add default backoff strategies (#3906) * Add default backoff strategies * Moved the backoff strategires to the SPI package * Use AssertJ instead of Hamcrest --- core/retries-api/pom.xml | 6 +- .../awssdk/retries/api/BackoffStrategy.java | 49 +++++ .../backoff/BackoffStrategiesConstants.java | 48 +++++ .../backoff/ExponentialDelayWithJitter.java | 68 +++++++ .../ExponentialDelayWithoutJitter.java | 62 ++++++ .../backoff/FixedDelayWithJitter.java | 52 ++++++ .../backoff/FixedDelayWithoutJitter.java | 47 +++++ .../api/internal/backoff/Immediately.java | 38 ++++ .../retries/api/RetryStrategyBuilderTest.java | 5 +- .../ExponentialDelayWithJitterTest.java | 176 ++++++++++++++++++ .../backoff/FixedDelayWithJitterTest.java | 176 ++++++++++++++++++ core/retries/pom.xml | 73 ++++++++ 12 files changed, 794 insertions(+), 6 deletions(-) create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java create mode 100644 core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java create mode 100644 core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java create mode 100644 core/retries/pom.xml diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 1c9b4a34c3cf..5dc96c1e9042 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.4-SNAPSHOT + 2.20.7-SNAPSHOT 4.0.0 @@ -60,8 +60,8 @@ test - org.hamcrest - hamcrest-all + org.assertj + assertj-core test diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java index 21e624ed0e1d..957948c07510 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java @@ -16,8 +16,14 @@ package software.amazon.awssdk.retries.api; import java.time.Duration; +import java.util.concurrent.ThreadLocalRandom; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.backoff.ExponentialDelayWithJitter; +import software.amazon.awssdk.retries.api.internal.backoff.ExponentialDelayWithoutJitter; +import software.amazon.awssdk.retries.api.internal.backoff.FixedDelayWithJitter; +import software.amazon.awssdk.retries.api.internal.backoff.FixedDelayWithoutJitter; +import software.amazon.awssdk.retries.api.internal.backoff.Immediately; /** * Determines how long to wait before each execution attempt. @@ -33,4 +39,47 @@ public interface BackoffStrategy { * @throws IllegalArgumentException If the given attempt is less or equal to zero. */ Duration computeDelay(int attempt); + + /** + * Do not back off: retry immediately. + */ + static BackoffStrategy retryImmediately() { + return new Immediately(); + } + + /** + * Wait for a random period of time between 0ms and the provided delay. + */ + static BackoffStrategy fixedDelay(Duration delay) { + return new FixedDelayWithJitter(ThreadLocalRandom::current, delay); + } + + /** + * Wait for a period of time equal to the provided delay. + */ + static BackoffStrategy fixedDelayWithoutJitter(Duration delay) { + return new FixedDelayWithoutJitter(delay); + } + + /** + * Wait for a random period of time between 0ms and an exponentially increasing amount of time between each subsequent attempt + * of the same call. + * + *

Specifically, the first attempt waits 0ms, and each subsequent attempt waits between + * 0ms and {@code min(maxDelay, baseDelay * (1 << (attempt - 2)))}. + */ + static BackoffStrategy exponentialDelay(Duration baseDelay, Duration maxDelay) { + return new ExponentialDelayWithJitter(ThreadLocalRandom::current, baseDelay, maxDelay); + } + + /** + * Wait for an exponentially increasing amount of time between each subsequent attempt of the same call. + * + *

Specifically, the first attempt waits 0ms, and each subsequent attempt waits for + * {@code min(maxDelay, baseDelay * (1 << (attempt - 2)))}. + */ + static BackoffStrategy exponentialDelayWithoutJitter(Duration baseDelay, Duration maxDelay) { + return new ExponentialDelayWithoutJitter(baseDelay, maxDelay); + } + } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java new file mode 100644 index 000000000000..72a9a6b043fb --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Constants and utility functions shared by the BackoffStrategy implementations. + */ +@SdkInternalApi +class BackoffStrategiesConstants { + static final Duration BASE_DELAY_CEILING = Duration.ofMillis(Integer.MAX_VALUE); // Around ~24.8 days + static final Duration MAX_BACKOFF_CEILING = Duration.ofMillis(Integer.MAX_VALUE); // Around ~24.8 days + /** + * Max permitted retry times. To prevent exponentialDelay from overflow, there must be 2 ^ retriesAttempted <= 2 ^ 31 - 1, + * which means retriesAttempted <= 30, so that is the ceil for retriesAttempted. + */ + static final int RETRIES_ATTEMPTED_CEILING = (int) Math.floor(Math.log(Integer.MAX_VALUE) / Math.log(2)); + + private BackoffStrategiesConstants() { + } + + /** + * Returns the computed exponential delay in milliseconds given the retries attempted, the base delay and the max backoff + * time. + * + *

Specifically it returns {@code min(maxDelay, baseDelay * (1 << (attempt - 2)))}. To prevent overflowing the attempts + * get capped to 30. + */ + static int calculateExponentialDelay(int retriesAttempted, Duration baseDelay, Duration maxBackoffTime) { + int cappedRetries = Math.min(retriesAttempted, BackoffStrategiesConstants.RETRIES_ATTEMPTED_CEILING); + return (int) Math.min(baseDelay.multipliedBy(1L << (cappedRetries - 2)).toMillis(), maxBackoffTime.toMillis()); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java new file mode 100644 index 000000000000..cd988e3a9aeb --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import static software.amazon.awssdk.retries.api.internal.backoff.BackoffStrategiesConstants.calculateExponentialDelay; + +import java.time.Duration; +import java.util.Random; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.utils.NumericUtils; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * Strategy that waits for a random period of time between 0ms and an exponentially increasing amount of time between each + * subsequent attempt of the same call. + * + *

Specifically, the first attempt waits 0ms, and each subsequent attempt waits between + * 0ms and {@code min(maxDelay, baseDelay * (1 << (attempt - 2)))}. + */ +@SdkInternalApi +public final class ExponentialDelayWithJitter implements BackoffStrategy { + private final Supplier randomSupplier; + private final Duration baseDelay; + private final Duration maxDelay; + + public ExponentialDelayWithJitter(Supplier randomSupplier, Duration baseDelay, Duration maxDelay) { + this.randomSupplier = Validate.paramNotNull(randomSupplier, "random"); + this.baseDelay = NumericUtils.min(Validate.isPositive(baseDelay, "baseDelay"), + BackoffStrategiesConstants.BASE_DELAY_CEILING); + this.maxDelay = NumericUtils.min(Validate.isPositive(maxDelay, "maxDelay"), + BackoffStrategiesConstants.MAX_BACKOFF_CEILING); + } + + @Override + public Duration computeDelay(int attempt) { + Validate.isPositive(attempt, "attempt"); + if (attempt == 1) { + return Duration.ZERO; + } + int delay = calculateExponentialDelay(attempt, baseDelay, maxDelay); + int randInt = randomSupplier.get().nextInt(delay); + return Duration.ofMillis(randInt); + } + + @Override + public String toString() { + return ToString.builder("ExponentialDelayWithJitter") + .add("baseDelay", baseDelay) + .add("maxDelay", maxDelay) + .build(); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java new file mode 100644 index 000000000000..f6ed54525b3b --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import static software.amazon.awssdk.retries.api.internal.backoff.BackoffStrategiesConstants.calculateExponentialDelay; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.utils.NumericUtils; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * Strategy that waits for an exponentially increasing amount of time between each subsequent attempt of the same call. + * + *

Specifically, the first attempt waits 0ms, and each subsequent attempt waits for + * {@code min(maxDelay, baseDelay * (1 << (attempt - 2)))}. + */ +@SdkInternalApi +public final class ExponentialDelayWithoutJitter implements BackoffStrategy { + private final Duration baseDelay; + private final Duration maxDelay; + + public ExponentialDelayWithoutJitter(Duration baseDelay, Duration maxDelay) { + this.baseDelay = NumericUtils.min(Validate.isPositive(baseDelay, "baseDelay"), + BackoffStrategiesConstants.BASE_DELAY_CEILING); + this.maxDelay = NumericUtils.min(Validate.isPositive(maxDelay, "maxDelay"), + BackoffStrategiesConstants.MAX_BACKOFF_CEILING); + } + + @Override + public Duration computeDelay(int attempt) { + Validate.isPositive(attempt, "attempt"); + if (attempt == 1) { + return Duration.ZERO; + } + int delay = calculateExponentialDelay(attempt, baseDelay, maxDelay); + return Duration.ofMillis(delay); + } + + @Override + public String toString() { + return ToString.builder("ExponentialDelayWithoutJitter") + .add("baseDelay", baseDelay) + .add("maxDelay", maxDelay) + .build(); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java new file mode 100644 index 000000000000..a1716c6f2c39 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java @@ -0,0 +1,52 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import java.time.Duration; +import java.util.Random; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.utils.NumericUtils; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * Strategy that waits for a random period of time between 0ms and the provided delay. + */ +@SdkInternalApi +public final class FixedDelayWithJitter implements BackoffStrategy { + private final Supplier randomSupplier; + private final Duration delay; + + public FixedDelayWithJitter(Supplier randomSupplier, Duration delay) { + this.randomSupplier = Validate.paramNotNull(randomSupplier, "random"); + this.delay = NumericUtils.min(Validate.isPositive(delay, "delay"), BackoffStrategiesConstants.BASE_DELAY_CEILING); + } + + @Override + public Duration computeDelay(int attempt) { + Validate.isPositive(attempt, "attempt"); + return Duration.ofMillis(randomSupplier.get().nextInt((int) delay.toMillis())); + } + + @Override + public String toString() { + return ToString.builder("FixedDelayWithJitter") + .add("delay", delay) + .build(); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java new file mode 100644 index 000000000000..a6090cfd1b82 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * Strategy that waits for a period of time equal to the provided delay. + */ +@SdkInternalApi +public final class FixedDelayWithoutJitter implements BackoffStrategy { + private final Duration delay; + + public FixedDelayWithoutJitter(Duration delay) { + this.delay = Validate.isPositive(delay, "delay"); + } + + @Override + public Duration computeDelay(int attempt) { + Validate.isPositive(attempt, "attempt"); + return delay; + } + + @Override + public String toString() { + return ToString.builder("FixedDelayWithoutJitter") + .add("delay", delay) + .build(); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java new file mode 100644 index 000000000000..ba8b780939cc --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.utils.Validate; + +/** + * Strategy that do not back off: retry immediately. + */ +@SdkInternalApi +public final class Immediately implements BackoffStrategy { + @Override + public Duration computeDelay(int attempt) { + Validate.isPositive(attempt, "attempt"); + return Duration.ZERO; + } + + @Override + public String toString() { + return "(Immediately)"; + } +} diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index ee14fc9fceda..a7023ccc153f 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -15,8 +15,7 @@ package software.amazon.awssdk.retries.api; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; +import static org.assertj.core.api.Assertions.assertThat; import java.util.Arrays; import java.util.Collection; @@ -107,7 +106,7 @@ public static Collection parameters() { @ParameterizedTest @MethodSource("parameters") public void testCase(TestCase testCase) { - assertThat(testCase.run(), equalTo(testCase.expected())); + assertThat(testCase.run()).isEqualTo(testCase.expected()); } static class TestCase { diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java new file mode 100644 index 000000000000..db35b9653994 --- /dev/null +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.Random; +import java.util.function.Function; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +public class ExponentialDelayWithJitterTest { + static final ComputedNextInt MIN_VALUE_RND = new ComputedNextInt(bound -> 0); + static final ComputedNextInt MID_VALUE_RND = new ComputedNextInt(bound -> bound / 2); + static final ComputedNextInt MAX_VALUE_RND = new ComputedNextInt(bound -> bound - 1); + static final Duration BASE_DELAY = Duration.ofMillis(23); + static final Duration MAX_DELAY = Duration.ofSeconds(20); + + public static Collection parameters() { + return Arrays.asList( + // --- Using random that returns: bound - 1 + new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(45) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(183) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(735) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(11775) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(19999) + // --- Using random that returns: bound / 2 + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(23) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(92) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(368) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(5888) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(10000) + // --- Using random that returns: 0 + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(0) + ); + } + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + assertThat(testCase.run()).isEqualTo(testCase.expected()); + } + + static class TestCase { + Random random; + int attempt; + long expectedDelayMs; + + TestCase configureRandom(Random random) { + this.random = random; + return this; + } + + TestCase givenAttempt(int attempt) { + this.attempt = attempt; + return this; + } + + TestCase expectDelayInMs(long expectedDelayMs) { + this.expectedDelayMs = expectedDelayMs; + return this; + } + + Duration run() { + return + new ExponentialDelayWithJitter(() -> random, BASE_DELAY, MAX_DELAY) + .computeDelay(this.attempt); + } + + Duration expected() { + return Duration.ofMillis(expectedDelayMs); + } + } + + static class ComputedNextInt extends Random { + final Function compute; + + ComputedNextInt(Function compute) { + this.compute = compute; + } + + @Override + public int nextInt(int bound) { + return compute.apply(bound); + } + } +} diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java new file mode 100644 index 000000000000..e144d4e8af92 --- /dev/null +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal.backoff; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.Random; +import java.util.function.Function; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +public class FixedDelayWithJitterTest { + static final ComputedNextInt MIN_VALUE_RND = new ComputedNextInt(bound -> 0); + static final ComputedNextInt MID_VALUE_RND = new ComputedNextInt(bound -> bound / 2); + static final ComputedNextInt MAX_VALUE_RND = new ComputedNextInt(bound -> bound - 1); + static final Duration BASE_DELAY = Duration.ofMillis(23); + + public static Collection parameters() { + return Arrays.asList( + // --- Using random that returns: bound - 1 + new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(22) + , new TestCase() + .configureRandom(MAX_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(22) + // --- Using random that returns: bound / 2 + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(11) + , new TestCase() + .configureRandom(MID_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(11) + // --- Using random that returns: 0 + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(1) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(2) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(3) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(5) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(7) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(11) + .expectDelayInMs(0) + , new TestCase() + .configureRandom(MIN_VALUE_RND) + .givenAttempt(13) + .expectDelayInMs(0) + ); + } + + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + assertThat(testCase.run()).isEqualTo(testCase.expected()); + } + + static class TestCase { + Random random; + int attempt; + long expectedDelayMs; + + TestCase configureRandom(Random random) { + this.random = random; + return this; + } + + TestCase givenAttempt(int attempt) { + this.attempt = attempt; + return this; + } + + TestCase expectDelayInMs(long expectedDelayMs) { + this.expectedDelayMs = expectedDelayMs; + return this; + } + + Duration run() { + return + new FixedDelayWithJitter(() -> random, BASE_DELAY) + .computeDelay(this.attempt); + } + + Duration expected() { + return Duration.ofMillis(expectedDelayMs); + } + } + + static class ComputedNextInt extends Random { + final Function compute; + + ComputedNextInt(Function compute) { + this.compute = compute; + } + + @Override + public int nextInt(int bound) { + return compute.apply(bound); + } + } +} diff --git a/core/retries/pom.xml b/core/retries/pom.xml new file mode 100644 index 000000000000..bbdeb4f0c342 --- /dev/null +++ b/core/retries/pom.xml @@ -0,0 +1,73 @@ + + + + + + core + software.amazon.awssdk + 2.20.7-SNAPSHOT + + 4.0.0 + + retries + AWS Java SDK :: Retries + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.retries + + + + + + + + + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + org.junit.jupiter + junit-jupiter + ${junit5.version} + test + + + org.assertj + assertj-core + test + + + From a806cd7064ae826a17ee639b24132c1e99ebb48c Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Thu, 27 Apr 2023 14:29:51 -0700 Subject: [PATCH 03/32] Add standard retry strategy (#3931) * Add standard retry strategy * Fix the AcquireInitialTokenRequestImpl API annotation Also add the package to the test/tests-coverage-reporting/pom.xml to get coverage reporting --- core/pom.xml | 1 + .../api/AcquireInitialTokenRequest.java | 8 + .../api/AcquireInitialTokenResponse.java | 8 + .../retries/api/RecordSuccessRequest.java | 9 + .../retries/api/RecordSuccessResponse.java | 9 + .../retries/api/RefreshRetryTokenRequest.java | 34 +- .../api/RefreshRetryTokenResponse.java | 8 + .../awssdk/retries/api/RetryStrategy.java | 35 +- .../AcquireInitialTokenRequestImpl.java | 45 +++ .../AcquireInitialTokenResponseImpl.java | 53 +++ .../internal/RecordSuccessRequestImpl.java | 42 +++ .../internal/RecordSuccessResponseImpl.java | 45 +++ .../RefreshRetryTokenRequestImpl.java | 105 ++++++ .../RefreshRetryTokenResponseImpl.java | 53 +++ .../retries/api/RetryStrategyBuilderTest.java | 33 +- .../awssdk/retries/DefaultRetryStrategy.java | 56 +++ .../awssdk/retries/StandardRetryStrategy.java | 90 +++++ .../retries/internal/DefaultRetryToken.java | 215 +++++++++++ .../internal/StandardRetryStrategyImpl.java | 356 ++++++++++++++++++ .../circuitbreaker/AcquireResponse.java | 134 +++++++ .../circuitbreaker/ReleaseResponse.java | 102 +++++ .../internal/circuitbreaker/TokenBucket.java | 130 +++++++ .../circuitbreaker/TokenBucketStore.java | 94 +++++ .../StandardRetryStrategyMiscTest.java | 72 ++++ .../internal/StandardRetryStrategyTest.java | 272 +++++++++++++ test/tests-coverage-reporting/pom.xml | 5 + 26 files changed, 1989 insertions(+), 25 deletions(-) create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java create mode 100644 core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultRetryToken.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/AcquireResponse.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/ReleaseResponse.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucket.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java diff --git a/core/pom.xml b/core/pom.xml index 0237a0e67d22..f776ccfd8a7e 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -47,6 +47,7 @@ endpoints-spi imds retries-api + retries diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java index a1e99a107442..06513a913052 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; /** * Encapsulates the abstract scope to start the attempts about to be executed using a retry strategy. @@ -33,4 +34,11 @@ public interface AcquireInitialTokenRequest { * requests against one resource do not result in throttling for requests against other, unrelated resources. */ String scope(); + + /** + * Creates a new {@link AcquireInitialTokenRequest} instance with the given scope. + */ + static AcquireInitialTokenRequest create(String scope) { + return AcquireInitialTokenRequestImpl.create(scope); + } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java index 96315075ad8e..5431955161ad 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java @@ -18,6 +18,7 @@ import java.time.Duration; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenResponseImpl; /** * Encapsulates the response from the {@link RetryStrategy} to the request to start the attempts to be executed. @@ -35,4 +36,11 @@ public interface AcquireInitialTokenResponse { * The amount of time to wait before performing the first attempt. */ Duration delay(); + + /** + * Creates a new {@link AcquireInitialTokenRequest} instance with the given scope. + */ + static AcquireInitialTokenResponse create(RetryToken token, Duration delay) { + return AcquireInitialTokenResponseImpl.create(token, delay); + } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java index ff1e2aa8b4cf..38537833b87c 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.RecordSuccessRequestImpl; /** * Request that the calling code makes to the {@link RetryStrategy} using @@ -30,4 +31,12 @@ public interface RecordSuccessRequest { * {@link RetryStrategy#refreshRetryToken} call. */ RetryToken token(); + + /** + * Creates a new {@link RecordSuccessRequest} instance with the given token. + */ + static RecordSuccessRequest create(RetryToken token) { + return RecordSuccessRequestImpl.create(token); + } + } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java index bf3f4b83caf4..1e0ff32e3a86 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.RecordSuccessResponseImpl; /** * Response given to the calling code by the {@link RetryStrategy} after calling @@ -30,4 +31,12 @@ public interface RecordSuccessResponse { * {@link RetryStrategy#refreshRetryToken} call. */ RetryToken token(); + + /** + * Creates a new {@link RecordSuccessResponseImpl} with the given token. + */ + static RecordSuccessResponse create(RetryToken token) { + return RecordSuccessResponseImpl.create(token); + } + } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java index 706a017b60c4..ce3ca4190baa 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java @@ -19,6 +19,9 @@ import java.util.Optional; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenRequestImpl; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; /** * Request that the calling code makes to the {@link RetryStrategy} using @@ -27,7 +30,7 @@ */ @SdkPublicApi @ThreadSafe -public interface RefreshRetryTokenRequest { +public interface RefreshRetryTokenRequest extends ToCopyableBuilder { /** * A {@link RetryToken} acquired a previous {@link RetryStrategy#acquireInitialToken} or * {@link RetryStrategy#refreshRetryToken} call. @@ -44,4 +47,33 @@ public interface RefreshRetryTokenRequest { * The cause of the last attempt failure. */ Throwable failure(); + + /** + * Returns a new builder to configure the {@link RefreshRetryTokenRequest} instance. + */ + static Builder builder() { + return RefreshRetryTokenRequestImpl.builder(); + } + + interface Builder extends CopyableBuilder { + /** + * Configures the {@link RetryToken} to be refreshed. + */ + Builder token(RetryToken token); + + /** + * Configures the suggested delay to used when refreshing the token. + */ + Builder suggestedDelay(Duration duration); + + /** + * Configures the latest caught exception. + */ + Builder failure(Throwable throwable); + + /** + * Builds and returns a new instance of {@linke RefreshRetryTokenRequest}. + */ + RefreshRetryTokenRequest build(); + } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java index 31b231426779..b083bfbf08c2 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java @@ -18,6 +18,7 @@ import java.time.Duration; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; /** * Response from the {@link RetryStrategy} after calling {@link RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)}. @@ -35,4 +36,11 @@ public interface RefreshRetryTokenResponse { * The amount of time to wait before performing the next attempt. */ Duration delay(); + + /** + * Creates a new {@link RefreshRetryTokenResponse} with the given token and delay. + */ + static RefreshRetryTokenResponse create(RetryToken token, Duration delay) { + return RefreshRetryTokenResponseImpl.create(token, delay); + } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java index c02d97411a43..8afeddbd461e 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -41,7 +41,10 @@ */ @ThreadSafe @SdkPublicApi -public interface RetryStrategy extends ToCopyableBuilder { +public interface RetryStrategy< + B extends CopyableBuilder & RetryStrategy.Builder, + T extends ToCopyableBuilder & RetryStrategy> + extends ToCopyableBuilder { /** * Invoked before the first request attempt. * @@ -86,21 +89,24 @@ public interface RetryStrategy extends ToCopyableBuilderThis is useful for modifying the strategy's behavior, like conditions or max retries. */ @Override - Builder toBuilder(); + B toBuilder(); /** * Builder to create immutable instances of {@link RetryStrategy}. */ - interface Builder extends CopyableBuilder { + interface Builder< + B extends Builder & CopyableBuilder, + T extends ToCopyableBuilder & RetryStrategy> + extends CopyableBuilder { /** * Configure the strategy to retry when the provided predicate returns true, given a failure exception. */ - Builder retryOnException(Predicate shouldRetry); + B retryOnException(Predicate shouldRetry); /** * Configure the strategy to retry when a failure exception class is equal to the provided class. */ - default Builder retryOnException(Class throwable) { + default B retryOnException(Class throwable) { return retryOnException(t -> t.getClass() == throwable); } @@ -108,14 +114,14 @@ default Builder retryOnException(Class throwable) { * Configure the strategy to retry when a failure exception class is an instance of the provided class (includes * subtypes). */ - default Builder retryOnExceptionInstanceOf(Class throwable) { + default B retryOnExceptionInstanceOf(Class throwable) { return retryOnException(t -> throwable.isAssignableFrom(t.getClass())); } /** * Configure the strategy to retry when a failure exception or one of its cause classes is equal to the provided class. */ - default Builder retryOnExceptionOrCause(Class throwable) { + default B retryOnExceptionOrCause(Class throwable) { return retryOnException(t -> { if (t.getClass() == throwable) { return true; @@ -135,7 +141,7 @@ default Builder retryOnExceptionOrCause(Class throwable) { * Configure the strategy to retry when a failure exception or one of its cause classes is an instance of the provided * class (includes subtypes). */ - default Builder retryOnExceptionOrCauseInstanceOf(Class throwable) { + default B retryOnExceptionOrCauseInstanceOf(Class throwable) { return retryOnException(t -> { if (throwable.isAssignableFrom(t.getClass())) { return true; @@ -155,7 +161,7 @@ default Builder retryOnExceptionOrCauseInstanceOf(Class thr * Configure the strategy to retry the root cause of a failure (the final cause) a failure exception is equal to the * provided class. */ - default Builder retryOnRootCause(Class throwable) { + default B retryOnRootCause(Class throwable) { return retryOnException(t -> { boolean shouldRetry = false; Throwable cause = t.getCause(); @@ -171,7 +177,7 @@ default Builder retryOnRootCause(Class throwable) { * Configure the strategy to retry the root cause of a failure (the final cause) a failure exception is an instance of to * the provided class (includes subtypes). */ - default Builder retryOnRootCauseInstanceOf(Class throwable) { + default B retryOnRootCauseInstanceOf(Class throwable) { return retryOnException(t -> { boolean shouldRetry = false; Throwable cause = t.getCause(); @@ -191,17 +197,12 @@ default Builder retryOnRootCauseInstanceOf(Class throwable) * *

The default value for the standard and adaptive retry strategies is 3. */ - Builder maxAttempts(int maxAttempts); - - /** - * Configure the predicate to allow the strategy categorize a Throwable as throttling exception. - */ - Builder treatAsThrottling(Predicate treatAsThrottling); + B maxAttempts(int maxAttempts); /** * Build a new {@link RetryStrategy} with the current configuration on this builder. */ @Override - RetryStrategy build(); + T build(); } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java new file mode 100644 index 000000000000..33a705ebbb68 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link AcquireInitialTokenRequest} interface. + */ +@SdkInternalApi +public final class AcquireInitialTokenRequestImpl implements AcquireInitialTokenRequest { + + private final String scope; + + private AcquireInitialTokenRequestImpl(String scope) { + this.scope = Validate.paramNotNull(scope, "scope"); + } + + @Override + public String scope() { + return scope; + } + + /** + * Creates a new {@link AcquireInitialTokenRequestImpl} instance with the given scope. + */ + public static AcquireInitialTokenRequest create(String scope) { + return new AcquireInitialTokenRequestImpl(scope); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java new file mode 100644 index 000000000000..0baad35e435a --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link AcquireInitialTokenResponse} interface. + */ +@SdkInternalApi +public final class AcquireInitialTokenResponseImpl implements AcquireInitialTokenResponse { + private final RetryToken token; + private final Duration delay; + + private AcquireInitialTokenResponseImpl(RetryToken token, Duration delay) { + this.token = Validate.paramNotNull(token, "token"); + this.delay = Validate.paramNotNull(delay, "delay"); + } + + @Override + public RetryToken token() { + return token; + } + + @Override + public Duration delay() { + return delay; + } + + /** + * Creates a new {@link AcquireInitialTokenResponseImpl} instance with the given token and suggested delay values. + */ + public static AcquireInitialTokenResponse create(RetryToken token, Duration delay) { + return new AcquireInitialTokenResponseImpl(token, delay); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java new file mode 100644 index 000000000000..049902ec2b01 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link RecordSuccessRequest} interfface + */ +@SdkInternalApi +public final class RecordSuccessRequestImpl implements RecordSuccessRequest { + private final RetryToken token; + + private RecordSuccessRequestImpl(RetryToken token) { + this.token = Validate.paramNotNull(token, "token"); + } + + @Override + public RetryToken token() { + return token; + } + + public static RecordSuccessRequest create(RetryToken token) { + return new RecordSuccessRequestImpl(token); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java new file mode 100644 index 000000000000..53bee4ee5305 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link RecordSuccessResponse} interface. + */ +@SdkInternalApi +public final class RecordSuccessResponseImpl implements RecordSuccessResponse { + private final RetryToken token; + + private RecordSuccessResponseImpl(RetryToken token) { + this.token = Validate.paramNotNull(token, "token"); + } + + @Override + public RetryToken token() { + return token; + } + + /** + * Creates a new {@link RecordSuccessResponseImpl} with the given token and responses. + */ + public static RecordSuccessResponse create(RetryToken token) { + return new RecordSuccessResponseImpl(token); + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java new file mode 100644 index 000000000000..fe90983b4839 --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java @@ -0,0 +1,105 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import java.time.Duration; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link RefreshRetryTokenRequest} interface. + */ +@SdkInternalApi +public final class RefreshRetryTokenRequestImpl implements RefreshRetryTokenRequest { + private final RetryToken token; + private final Duration suggestedDelay; + private final Throwable failure; + + private RefreshRetryTokenRequestImpl(Builder builder) { + this.token = Validate.paramNotNull(builder.token, "token"); + this.suggestedDelay = Validate.paramNotNull(builder.suggestedDelay, "suggestedDelay"); + Validate.isNotNegative(this.suggestedDelay, "suggestedDelay"); + this.failure = Validate.paramNotNull(builder.failure, "failure"); + } + + @Override + public RetryToken token() { + return token; + } + + @Override + public Optional suggestedDelay() { + return Optional.of(suggestedDelay); + } + + @Override + public Throwable failure() { + return failure; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns a new builder to create a new instance of {@link RefreshRetryTokenRequest} + */ + public static Builder builder() { + return new Builder(); + } + + public static final class Builder implements RefreshRetryTokenRequest.Builder { + private RetryToken token; + private Duration suggestedDelay = Duration.ZERO; + private Throwable failure; + + Builder(RefreshRetryTokenRequestImpl refreshRetryTokenRequest) { + this.token = refreshRetryTokenRequest.token; + this.suggestedDelay = refreshRetryTokenRequest.suggestedDelay; + this.failure = refreshRetryTokenRequest.failure; + } + + Builder() { + } + + @Override + public Builder token(RetryToken token) { + this.token = token; + return this; + } + + @Override + public Builder suggestedDelay(Duration duration) { + this.suggestedDelay = duration; + return this; + } + + @Override + public Builder failure(Throwable throwable) { + this.failure = throwable; + return this; + } + + @Override + public RefreshRetryTokenRequestImpl build() { + return new RefreshRetryTokenRequestImpl(this); + } + } +} diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java new file mode 100644 index 000000000000..8b84e3a975ef --- /dev/null +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.api.internal; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation for the {@link RefreshRetryTokenResponse} interface. + */ +@SdkInternalApi +public final class RefreshRetryTokenResponseImpl implements RefreshRetryTokenResponse { + private final RetryToken token; + private final Duration delay; + + private RefreshRetryTokenResponseImpl(RetryToken token, Duration delay) { + this.token = Validate.paramNotNull(token, "token"); + this.delay = Validate.isNotNegative(Validate.paramNotNull(delay, "delay"), "delay"); + } + + @Override + public RetryToken token() { + return token; + } + + @Override + public Duration delay() { + return delay; + } + + /** + * Creates a new {@link RefreshRetryTokenResponse} with the given token and delay. + */ + public static RefreshRetryTokenResponse create(RetryToken token, Duration delay) { + return new RefreshRetryTokenResponseImpl(token, delay); + } +} diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index a7023ccc153f..cea417cd06db 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -114,7 +114,7 @@ static class TestCase { private Throwable testThrowable; private boolean expectedTestResult; - TestCase configure(Function configure) { + TestCase configure(Function configure) { configure.apply(builder); return this; } @@ -143,7 +143,7 @@ boolean expected() { } } - static class BuilderToTestDefaults implements RetryStrategy.Builder { + static class BuilderToTestDefaults implements RetryStrategy.Builder { Predicate shouldRetryCapture = null; Predicate shouldRetryCapture() { @@ -151,24 +151,43 @@ Predicate shouldRetryCapture() { } @Override - public RetryStrategy.Builder retryOnException(Predicate shouldRetry) { + public BuilderToTestDefaults retryOnException(Predicate shouldRetry) { shouldRetryCapture = shouldRetry; return this; } @Override - public RetryStrategy.Builder maxAttempts(int maxAttempts) { + public BuilderToTestDefaults maxAttempts(int maxAttempts) { return this; } @Override - public RetryStrategy.Builder treatAsThrottling(Predicate treatAsThrottling) { - return this; + public DummyRetryStrategy build() { + return null; } + } + + static class DummyRetryStrategy implements RetryStrategy { @Override - public RetryStrategy build() { + public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + return null; + } + + @Override + public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + return null; + } + + @Override + public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + return null; + } + + @Override + public BuilderToTestDefaults toBuilder() { return null; } } + } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java new file mode 100644 index 000000000000..f49545125553 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; + +/** + * Built-in implementations of the {@link RetryStrategy} interface. + */ +@SdkPublicApi +public final class DefaultRetryStrategy { + + private DefaultRetryStrategy() { + } + + /** + * Create a new builder for a {@code StandardRetryStrategy}. + * + *

Example Usage + *

+     * StandardRetryStrategy retryStrategy =
+     *     RetryStrategies.adaptiveStrategyBuilder()
+     *                    .retryOnExceptionInstanceOf(IllegalArgumentException.class)
+     *                    .retryOnExceptionInstanceOf(IllegalStateException.class)
+     *                    .build();
+     * 
+ */ + public static StandardRetryStrategy.Builder standardStrategyBuilder() { + return StandardRetryStrategy.builder() + .maxAttempts(Standard.MAX_ATTEMPTS) + .backoffStrategy(BackoffStrategy.exponentialDelay(Standard.BASE_DELAY, Standard.MAX_BACKOFF)) + .circuitBreakerEnabled(true); + } + + static final class Standard { + static final int MAX_ATTEMPTS = 3; + static final Duration BASE_DELAY = Duration.ofSeconds(1); + static final Duration MAX_BACKOFF = Duration.ofSeconds(20); + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java new file mode 100644 index 000000000000..4cf8a4d20768 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.internal.StandardRetryStrategyImpl; + +/** + * The standard retry strategy is the recommended {@link RetryStrategy} for normal use-cases. + *

+ * Unlike {@link AdaptiveRetryStrategy}, the standard strategy is generally useful across all retry use-cases. + *

+ * The standard retry strategy by default: + *

    + *
  1. Retries on the conditions configured in the {@link Builder}. + *
  2. Retries 2 times (3 total attempts). Adjust with {@link Builder#maxAttempts(int)} + *
  3. Uses the {@link BackoffStrategy#exponentialDelay} backoff strategy, with a base delay of + * 1 second and max delay of 20 seconds. Adjust with {@link Builder#backoffStrategy} + *
  4. Circuit breaking (disabling retries) in the event of high downstream failures across the scope of + * the strategy. The circuit breaking will never prevent a successful first attempt. Adjust with + * {@link Builder#circuitBreakerEnabled}. + *
+ * + * @see AdaptiveRetryStrategy + */ +@SdkPublicApi +@ThreadSafe +public interface StandardRetryStrategy extends RetryStrategy { + /** + * Create a new {@link StandardRetryStrategy.Builder}. + * + *

Example Usage + *

+     * StandardRetryStrategy retryStrategy =
+     *     StandardRetryStrategy.builder()
+     *                          .retryOnExceptionInstanceOf(IllegalArgumentException.class)
+     *                          .retryOnExceptionInstanceOf(IllegalStateException.class)
+     *                          .build();
+     * 
+ */ + static Builder builder() { + return StandardRetryStrategyImpl.builder(); + } + + @Override + Builder toBuilder(); + + interface Builder extends RetryStrategy.Builder { + /** + * Configure the backoff strategy used by this executor. + * + *

By default, this uses jittered exponential backoff. + */ + Builder backoffStrategy(BackoffStrategy backoffStrategy); + + /** + * Whether circuit breaking is enabled for this executor. + * + *

The circuit breaker will prevent attempts (even below the {@link #maxAttempts(int)}) if a large number of + * failures are observed by this executor. + * + *

Note: The circuit breaker scope is local to the created {@link RetryStrategy}, + * and will therefore not be effective unless the {@link RetryStrategy} is used for more than one call. It's recommended + * that a {@link RetryStrategy} be reused for all calls to a single unreliable resource. It's also recommended that + * separate {@link RetryStrategy}s be used for calls to unrelated resources. + * + *

By default, this is {@code true}. + */ + Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled); + + @Override + StandardRetryStrategy build(); + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultRetryToken.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultRetryToken.java new file mode 100644 index 000000000000..44adc829325e --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultRetryToken.java @@ -0,0 +1,215 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A data rich {@link RetryToken} implementation. The data contained in this class is not part of the interface but is needed for + * the calling code to test and generate meaningful logs using its current state. + */ +@SdkInternalApi +public final class DefaultRetryToken implements RetryToken, ToCopyableBuilder { + private final String scope; + private final TokenState state; + private final int attempt; + private final int capacityAcquired; + private final int capacityRemaining; + private final List failures; + + private DefaultRetryToken(Builder builder) { + this.scope = Validate.paramNotNull(builder.scope, "scope"); + this.state = Validate.paramNotNull(builder.state, "status"); + this.attempt = Validate.isPositive(builder.attempt, "attempt"); + this.capacityAcquired = Validate.isNotNegative(builder.capacityAcquired, "capacityAcquired"); + this.capacityRemaining = Validate.isNotNegative(builder.capacityRemaining, "capacityRemaining"); + this.failures = Collections.unmodifiableList(Validate.paramNotNull(builder.failures, "failures")); + } + + /** + * Returns the latest attempt count. + */ + public int attempt() { + return attempt; + } + + /** + * Returns the token scope. + */ + public String scope() { + return scope; + } + + /** + * Returns the latest capacity acquired from the token bucket. + */ + public int capacityAcquired() { + return capacityAcquired; + } + + /** + * Returns the capacity remaining in the token bucket when the last acquire request was done. + */ + public int capacityRemaining() { + return capacityRemaining; + } + + /** + * Returns the state of the token. + */ + public TokenState state() { + return state; + } + + /** + * Creates a new builder to mutate the current instance. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return ToString.builder("StandardRetryToken") + .add("scope", scope) + .add("status", state) + .add("attempt", attempt) + .add("capacityAcquired", capacityAcquired) + .add("capacityRemaining", capacityRemaining) + .add("failures", failures) + .build(); + } + + /** + * Returns a new builder to create new instances of the {@link DefaultRetryToken} class. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Set of possibles states on which the RetryToken can be, in-progress, succeeded and all the possible failure modes. + */ + public enum TokenState { + /** + * The request operation is in-progress. + */ + IN_PROGRESS, + /** + * The request operation concluded successfully. + */ + SUCCEEDED, + /** + * The request operation failed with token acquisition failure. + */ + TOKEN_ACQUISITION_FAILED, + /** + * The request operation failed with max retries reached. + */ + MAX_RETRIES_REACHED, + /** + * The request operation failed with non-retryable exception caught. + */ + NON_RETRYABLE_EXCEPTION + } + + /** + * A builder class to create {@link DefaultRetryToken} instances or to mutate them. + */ + public static class Builder implements CopyableBuilder { + private TokenState state = TokenState.IN_PROGRESS; + private String scope; + private int attempt = 1; + private int capacityAcquired = 0; + private int capacityRemaining = 0; + private List failures; + + Builder() { + this.failures = new ArrayList<>(); + } + + Builder(DefaultRetryToken token) { + this.scope = token.scope; + this.attempt = token.attempt; + this.capacityAcquired = token.capacityAcquired; + this.capacityRemaining = token.capacityRemaining; + this.failures = new ArrayList<>(token.failures); + } + + /** + * Sets the scope of the retry token. + */ + public Builder scope(String scope) { + this.scope = scope; + return this; + } + + /** + * Sets the state of the retry token. + */ + public Builder state(TokenState state) { + this.state = state; + return this; + } + + /** + * Increments the current attempt count. + */ + public Builder increaseAttempt() { + ++this.attempt; + return this; + } + + /** + * Sets the capacity acquired from the token bucket. + */ + public Builder capacityAcquired(int capacityAcquired) { + this.capacityAcquired = capacityAcquired; + return this; + } + + /** + * Sets the capacity remaining in the token bucket after the last acquire. + */ + public Builder capacityRemaining(int capacityRemaining) { + this.capacityRemaining = capacityRemaining; + return this; + } + + /** + * Adds a {@link Throwable} to the retry-token. + */ + public Builder addFailure(Throwable failure) { + this.failures.add(Validate.paramNotNull(failure, "failure")); + return this; + } + + /** + * Creates a new {@link DefaultRetryToken} with the configured values. + */ + public DefaultRetryToken build() { + return new DefaultRetryToken(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java new file mode 100644 index 000000000000..4e8ce225d09f --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java @@ -0,0 +1,356 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ToBuilderIgnoreField; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenResponseImpl; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link StandardRetryStrategy} interface. + */ +@SdkInternalApi +public final class StandardRetryStrategyImpl implements StandardRetryStrategy { + private static final Logger LOG = Logger.loggerFor(StandardRetryStrategyImpl.class); + + private final List> predicates; + private final int maxAttempts; + private final boolean circuitBreakerEnabled; + private final BackoffStrategy backoffStrategy; + private final int exceptionCost; + private final TokenBucketStore tokenBucketStore; + + private StandardRetryStrategyImpl(Builder builder) { + this.predicates = Collections.unmodifiableList(Validate.paramNotNull(builder.predicates, "predicates")); + this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); + this.circuitBreakerEnabled = builder.circuitBreakerEnabled; + this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); + this.exceptionCost = builder.exceptionCost; + this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); + } + + @Override + public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + logAcquireInitialToken(request); + return AcquireInitialTokenResponseImpl.create( + DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); + } + + @Override + public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + + // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. + // 1) is retryable? + throwOnNonRetryableException(request, acquireResponse); + // 2) max attempts reached? + throwOnMaxAttemptsReached(request, acquireResponse); + // 3) can we acquire a token? + throwOnAcquisitionFailure(request, acquireResponse); + + // Refresh the retry token and compute the backoff delay. + DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); + Duration backoff = backoffStrategy.computeDelay(refreshedToken.attempt()); + + // Take the max delay between the suggested delay and the backoff delay. + Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); + Duration finalDelay = maxOf(suggested, backoff); + + logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); + return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); + } + + @Override + public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + + // Update the circuit breaker token bucket. + ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); + + // Refresh the retry token and return + DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); + + // Log success and return. + logRecordSuccess(token, releaseResponse); + return RecordSuccessResponse.create(refreshedToken); + } + + @Override + @ToBuilderIgnoreField({"DEFAULT_EXCEPTION_TOKEN_COST", "DEFAULT_TOKEN_BUCKET_SIZE"}) + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns a builder to update this retry strategy. + */ + public static Builder builder() { + return new Builder(); + } + + private Duration maxOf(Duration left, Duration right) { + if (left.compareTo(right) >= 0) { + return left; + } + return right; + } + + private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { + TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); + int capacityReleased = token.capacityAcquired(); + return bucket.release(capacityReleased); + } + + private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { + return token.toBuilder() + .capacityAcquired(0) + .capacityRemaining(releaseResponse.currentCapacity()) + .state(DefaultRetryToken.TokenState.SUCCEEDED) + .build(); + } + + private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (acquireResponse.acquisitionFailed()) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .addFailure(failure) + .build(); + String message = acquisitionFailedMessage(acquireResponse); + LOG.error(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (maxAttemptsReached(token)) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .addFailure(failure) + .build(); + String message = maxAttemptsReachedMessage(refreshedToken); + LOG.error(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + Throwable failure = request.failure(); + if (isNonRetryableException(request)) { + String message = nonRetryableExceptionMessage(token); + LOG.error(() -> message, failure); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .addFailure(failure) + .build(); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + int attempt = token.attempt(); + LOG.warn(() -> String.format("Request attempt %d encountered retryable failure.", attempt), failure); + } + + private String nonRetryableExceptionMessage(DefaultRetryToken token) { + return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); + } + + private String maxAttemptsReachedMessage(DefaultRetryToken token) { + return String.format("Request will not be retried. Retries have been exhausted " + + "(cost: 0, capacity: %d/%d)", + token.capacityAcquired(), + token.capacityRemaining()); + } + + private String acquisitionFailedMessage(AcquireResponse acquireResponse) { + return String.format("Request will not be retried to protect the caller and downstream service. " + + "The cost of retrying (%d) " + + "exceeds the available retry capacity (%d/%d).", + acquireResponse.capacityRequested(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity()); + } + + private void logAcquireInitialToken(AcquireInitialTokenRequest request) { + // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); + LOG.debug(() -> String.format("Request attempt 1 token acquired " + + "(backoff: 0ms, cost: 0, capacity: %d/%d)", + tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); + } + + private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { + LOG.debug(() -> String.format("Request attempt %d token acquired " + + "(backoff: %dms, cost: %d, capacity: %d/%d)", + token.attempt(), delay.toMillis(), + acquireResponse.capacityAcquired(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity())); + } + + private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { + LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", + token.attempt(), release.capacityReleased(), + release.currentCapacity(), release.maxCapacity())); + + } + + private boolean maxAttemptsReached(DefaultRetryToken token) { + return token.attempt() >= maxAttempts; + } + + private boolean isNonRetryableException(RefreshRetryTokenRequest request) { + Throwable failure = request.failure(); + for (Predicate predicate : predicates) { + if (predicate.test(failure)) { + return false; + } + } + return true; + } + + static DefaultRetryToken asStandardRetryToken(RetryToken token) { + return Validate.isInstanceOf(DefaultRetryToken.class, token, + "RetryToken is of unexpected class (%s), " + + "This token was not created by this retry strategy.", + token.getClass().getName()); + } + + private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + if (!circuitBreakerEnabled) { + return tokenBucket.tryAcquire(0); + } + return tokenBucket.tryAcquire(exceptionCost); + } + + private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + return token.toBuilder() + .increaseAttempt() + .state(DefaultRetryToken.TokenState.IN_PROGRESS) + .capacityAcquired(acquireResponse.capacityAcquired()) + .capacityRemaining(acquireResponse.capacityRemaining()) + .addFailure(request.failure()) + .build(); + } + + public static class Builder implements StandardRetryStrategy.Builder { + private static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; + private static final int DEFAULT_TOKEN_BUCKET_SIZE = 500; + private List> predicates; + private int maxAttempts; + private boolean circuitBreakerEnabled; + private int exceptionCost; + private BackoffStrategy backoffStrategy; + private TokenBucketStore tokenBucketStore; + + Builder() { + predicates = new ArrayList<>(); + exceptionCost = DEFAULT_EXCEPTION_TOKEN_COST; + circuitBreakerEnabled = true; + tokenBucketStore = TokenBucketStore.builder() + .tokenBucketMaxCapacity(DEFAULT_TOKEN_BUCKET_SIZE) + .build(); + } + + Builder(StandardRetryStrategyImpl strategy) { + this.predicates = new ArrayList<>(strategy.predicates); + this.maxAttempts = strategy.maxAttempts; + this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; + this.exceptionCost = strategy.exceptionCost; + this.backoffStrategy = strategy.backoffStrategy; + this.tokenBucketStore = strategy.tokenBucketStore; + } + + @Override + public Builder retryOnException(Predicate shouldRetry) { + this.predicates.add(shouldRetry); + return this; + } + + @Override + public Builder maxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + return this; + } + + @Override + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + if (circuitBreakerEnabled == null) { + this.circuitBreakerEnabled = true; + } else { + this.circuitBreakerEnabled = circuitBreakerEnabled; + } + return this; + } + + @Override + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + this.backoffStrategy = backoffStrategy; + return this; + } + + public Builder tokenBucketExceptionCost(int exceptionCost) { + this.exceptionCost = exceptionCost; + return this; + } + + public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { + this.tokenBucketStore = tokenBucketStore; + return this; + } + + @Override + public StandardRetryStrategyImpl build() { + return new StandardRetryStrategyImpl(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/AcquireResponse.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/AcquireResponse.java new file mode 100644 index 000000000000..134e8e655f96 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/AcquireResponse.java @@ -0,0 +1,134 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.circuitbreaker; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * The number of tokens in the token bucket after a specific token acquisition succeeds. + */ +@SdkInternalApi +public final class AcquireResponse implements ToCopyableBuilder { + private final int maxCapacity; + private final int capacityRequested; + private final int capacityAcquired; + private final int capacityRemaining; + private final boolean acquisitionFailed; + + private AcquireResponse(Builder builder) { + this.maxCapacity = Validate.notNull(builder.maxCapacity, "maxCapacity"); + this.capacityRequested = Validate.notNull(builder.capacityRequested, "capacityRequested"); + this.capacityAcquired = Validate.notNull(builder.capacityAcquired, "capacityAcquired"); + this.capacityRemaining = Validate.notNull(builder.capacityRemaining, "capacityRemaining"); + this.acquisitionFailed = Validate.notNull(builder.acquisitionFailed, "acquisitionFailed"); + } + + public static Builder builder() { + return new Builder(); + } + + /** + * The max capacity. + */ + public int maxCapacity() { + return maxCapacity; + } + + /** + * The numbers of token requested by the last token acquisition. + */ + public int capacityRequested() { + return capacityRequested; + } + + /** + * The number of tokens acquired by the last token acquisition. + */ + public int capacityAcquired() { + return capacityAcquired; + } + + /** + * The number of tokens in the token bucket. + */ + public int capacityRemaining() { + return capacityRemaining; + } + + /** + * Returns {@code true} if the requested capacity was not successfully acquired. + */ + public boolean acquisitionFailed() { + return acquisitionFailed; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + public static class Builder implements CopyableBuilder { + private Integer maxCapacity; + private Integer capacityRequested; + private Integer capacityAcquired; + private Integer capacityRemaining; + private Boolean acquisitionFailed; + + private Builder() { + } + + private Builder(AcquireResponse instance) { + this.maxCapacity = instance.maxCapacity; + this.capacityRequested = instance.capacityRequested; + this.capacityAcquired = instance.capacityAcquired; + this.capacityRemaining = instance.capacityRemaining; + this.acquisitionFailed = instance.acquisitionFailed; + } + + public Builder maxCapacity(Integer maxCapacity) { + this.maxCapacity = maxCapacity; + return this; + } + + public Builder capacityRequested(Integer capacityRequested) { + this.capacityRequested = capacityRequested; + return this; + } + + public Builder capacityAcquired(Integer capacityAcquired) { + this.capacityAcquired = capacityAcquired; + return this; + } + + public Builder capacityRemaining(Integer capacityRemaining) { + this.capacityRemaining = capacityRemaining; + return this; + } + + public Builder acquisitionFailed(Boolean acquisitionFailed) { + this.acquisitionFailed = acquisitionFailed; + return this; + } + + @Override + public AcquireResponse build() { + return new AcquireResponse(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/ReleaseResponse.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/ReleaseResponse.java new file mode 100644 index 000000000000..f63ee4cb4ac0 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/ReleaseResponse.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.circuitbreaker; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +@SdkInternalApi +public final class ReleaseResponse implements ToCopyableBuilder { + private final int capacityReleased; + private final int currentCapacity; + private final int maxCapacity; + + private ReleaseResponse(Builder builder) { + this.capacityReleased = Validate.paramNotNull(builder.capacityReleased, "capacityReleased"); + this.currentCapacity = Validate.paramNotNull(builder.currentCapacity, "currentCapacity"); + this.maxCapacity = Validate.paramNotNull(builder.maxCapacity, "maxCapacity"); + } + + /** + * Returns the capacity released from the request. + */ + public int capacityReleased() { + return capacityReleased; + } + + /** + * Returns the capacity of the token bucket after the release. + */ + public int currentCapacity() { + return currentCapacity; + } + + /** + * Returns the max capacity for the token bucket. + */ + public int maxCapacity() { + return maxCapacity; + } + + /** + * Creates a new builder to build a {@link ReleaseResponse} instance. + */ + public static Builder builder() { + return new Builder(); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + public static class Builder implements CopyableBuilder { + private Integer capacityReleased; + private Integer currentCapacity; + private Integer maxCapacity; + + Builder(ReleaseResponse releaseResponse) { + this.capacityReleased = releaseResponse.capacityReleased; + this.currentCapacity = releaseResponse.currentCapacity; + this.maxCapacity = releaseResponse.maxCapacity; + } + + Builder() { + } + + public Builder capacityReleased(Integer capacityReleased) { + this.capacityReleased = capacityReleased; + return this; + } + + public Builder currentCapacity(Integer currentCapacity) { + this.currentCapacity = currentCapacity; + return this; + } + + public Builder maxCapacity(Integer maxCapacity) { + this.maxCapacity = maxCapacity; + return this; + } + + public ReleaseResponse build() { + return new ReleaseResponse(this); + } + + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucket.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucket.java new file mode 100644 index 000000000000..8abaf5082959 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucket.java @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.circuitbreaker; + +import java.util.concurrent.atomic.AtomicInteger; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * A lock-free implementation of a token bucket. Tokens can be acquired from the bucket as long as there is sufficient capacity in + * the bucket. + */ +@SdkInternalApi +public final class TokenBucket { + private final int maxCapacity; + private final AtomicInteger capacity; + + /** + * Create a bucket containing the specified number of tokens. + */ + TokenBucket(int maxCapacity) { + this.maxCapacity = maxCapacity; + this.capacity = new AtomicInteger(maxCapacity); + } + + /** + * Try to acquire a certain number of tokens from this bucket. If there aren't sufficient tokens in this bucket then + * {@link AcquireResponse#acquisitionFailed()} returns {@code true}. + */ + public AcquireResponse tryAcquire(int amountToAcquire) { + Validate.isNotNegative(amountToAcquire, "amountToAcquire"); + AcquireResponse.Builder responseBuilder = AcquireResponse.builder() + .maxCapacity(maxCapacity) + .capacityRequested(amountToAcquire); + + if (amountToAcquire == 0) { + return responseBuilder + .acquisitionFailed(false) + .capacityAcquired(0) + .capacityRemaining(capacity.get()) + .build(); + } + + int currentCapacity; + int newCapacity; + do { + currentCapacity = capacity.get(); + newCapacity = currentCapacity - amountToAcquire; + if (newCapacity < 0) { + return responseBuilder + .acquisitionFailed(true) + .capacityAcquired(0) + .capacityRemaining(capacity.get()) + .build(); + + } + } while (!capacity.compareAndSet(currentCapacity, newCapacity)); + + return responseBuilder + .acquisitionFailed(false) + .capacityAcquired(amountToAcquire) + .capacityRemaining(newCapacity) + .build(); + } + + /** + * Release a certain number of tokens back to this bucket. If this number of tokens would exceed the maximum number of tokens + * configured for the bucket, the bucket is instead set to the maximum value and the additional tokens are discarded. + */ + public ReleaseResponse release(int amountToRelease) { + Validate.isTrue(amountToRelease >= 0, "Amount must not be negative."); + ReleaseResponse.Builder builder = + ReleaseResponse.builder() + .capacityReleased(amountToRelease) + .maxCapacity(maxCapacity); + + if (amountToRelease == 0) { + return builder.currentCapacity(capacity.get()) + .build(); + } + + int currentCapacity; + int newCapacity; + do { + currentCapacity = capacity.get(); + newCapacity = Math.min(currentCapacity + amountToRelease, maxCapacity); + } while (!capacity.compareAndSet(currentCapacity, newCapacity)); + + return builder.currentCapacity(newCapacity) + .build(); + } + + /** + * Retrieve a snapshot of the current number of tokens in the bucket. Because this number is constantly changing, it's + * recommended to refer to the {@link AcquireResponse#capacityRemaining()} returned by the {@link #tryAcquire(int)} method + * whenever possible. + */ + public int currentCapacity() { + return capacity.get(); + } + + /** + * Retrieve the maximum capacity of the bucket configured when the bucket was created. + */ + public int maxCapacity() { + return maxCapacity; + } + + @Override + public String toString() { + return ToString.builder("TokenBucket") + .add("maxCapacity", maxCapacity) + .add("capacity", capacity) + .build(); + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java new file mode 100644 index 000000000000..f949d8903fe9 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java @@ -0,0 +1,94 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.circuitbreaker; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * A store to keep token buckets per scope. + */ +@SdkInternalApi +public final class TokenBucketStore { + private static final int DEFAULT_MAX_TOKENS = 500; + private static final int MAX_ENTRIES = 128; + private final int tokenBucketMaxCapacity; + private final Map scopeToTokenBucket; + + @SuppressWarnings("serial") + private TokenBucketStore(Builder builder) { + this.tokenBucketMaxCapacity = builder.tokenBucketMaxCapacity; + this.scopeToTokenBucket = new ConcurrentHashMap<>(new LruMap<>()); + } + + /** + * Returns the {@link TokenBucket} for the given scope. + */ + public TokenBucket tokenBucketForScope(String scope) { + Validate.paramNotNull(scope, "scope"); + return scopeToTokenBucket.computeIfAbsent(scope, + key -> new TokenBucket(tokenBucketMaxCapacity)); + } + + /** + * Returns a new builder to create a new store. + */ + public static TokenBucketStore.Builder builder() { + return new Builder(); + } + + /** + * A map that limits the number of entries it holds to at most {@link TokenBucketStore#MAX_ENTRIES}. If the limit is exceeded + * then the last recently used entry is removed to make room for the new one. + */ + @SuppressWarnings("serial") + static final class LruMap extends LinkedHashMap { + private static final long serialVersionUID = 885024284016559479L; + + LruMap() { + super(MAX_ENTRIES, 1.0f, true); + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > MAX_ENTRIES; + } + } + + public static class Builder { + private int tokenBucketMaxCapacity; + + Builder() { + tokenBucketMaxCapacity = DEFAULT_MAX_TOKENS; + } + + Builder(TokenBucketStore store) { + this.tokenBucketMaxCapacity = store.tokenBucketMaxCapacity; + } + + public Builder tokenBucketMaxCapacity(int tokenBucketMaxCapacity) { + this.tokenBucketMaxCapacity = tokenBucketMaxCapacity; + return this; + } + + public TokenBucketStore build() { + return new TokenBucketStore(this); + } + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java new file mode 100644 index 000000000000..3beac79074aa --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static software.amazon.awssdk.retries.internal.StandardRetryStrategyTest.TestCase; +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; + +/** + * Tests that the circuit breaker remembers its previous state for separated + * requests. + */ +public class StandardRetryStrategyMiscTest { + static final int TEST_EXCEPTION_COST = 5; + static final int TEST_MAX = 50; + static final IllegalArgumentException IAE = new IllegalArgumentException(); + static final RuntimeException RTE = new RuntimeException(); + + @Test + public void circuitBreakerRemembersState() { + BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); + TestCase testCase = new TestCase("circuit breaker remembers state") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .fineTune(b -> b.tokenBucketExceptionCost(TEST_EXCEPTION_COST)) + .fineTune(b -> b.tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(TEST_MAX) + .build())) + .givenExceptions(IAE, IAE); + + // The test case will throw twice and then succeed, so each run will withdraw 2 * TEST_EXCEPTION_COST and deposit back + // TEST_EXCEPTION_COST. + StandardRetryStrategy strategy = testCase.builder.build(); + int total = TEST_MAX; + for (int idx = 0; idx < 9; idx++) { + String name = testCase.name + " round " + idx; + TestCase.runTestCase(testCase, strategy); + assertThat(testCase.thrown).as(name).isNull(); + assertThat(testCase.succeeded).as(name).isTrue(); + assertThat(testCase.token.capacityRemaining()).as(name).isEqualTo(total - TEST_EXCEPTION_COST); + assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.SUCCEEDED); + total -= TEST_EXCEPTION_COST; + } + // The tokens have been exhausted, assert that the next call will fail. + String name = testCase.name + " no more tokens available"; + TestCase.runTestCase(testCase, strategy); + assertThat(testCase.thrown).as(name).isNotNull(); + assertThat(testCase.succeeded).as(name).isFalse(); + assertThat(testCase.token.capacityRemaining()).as(name).isZero(); + assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED); + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java new file mode 100644 index 000000000000..a87b200389cc --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java @@ -0,0 +1,272 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.function.Function; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; + +public class StandardRetryStrategyTest { + static final int TEST_BUCKET_CAPACITY = 100; + static final int TEST_EXCEPTION_COST = 5; + static final IllegalArgumentException IAE = new IllegalArgumentException(); + static final RuntimeException RTE = new RuntimeException(); + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + testCase.run(); + if (testCase.shouldSucceed) { + assertThat(testCase.thrown) + .as(testCase.name) + .isNull(); + } else { + assertThat(testCase.thrown) + .as(testCase.name) + .isNotNull(); + } + assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); + assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); + assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); + + } + + public static Collection parameters() { + BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); + return Arrays.asList( + new TestCase("Succeeds when no exceptions are thrown") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Succeeds when 1 exception is thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE) + // Acquire cost and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE) + // Acquire (cost * 2) and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Fails when 3 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , new TestCase("Fails when 4 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , new TestCase("Fails when non-retryable exception throw in the 1st attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , new TestCase("Fails when non-retryable exception throw in the 2nd attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , new TestCase("Exhausts the token bucket.") + .configure(b -> b.maxAttempts(5)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + .fineTune(b -> b.tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(10) + .build())) + .givenExceptions(IAE, IAE, IAE) + .expectCapacity(0) + .expectState(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .expectThrows() + , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configure(b -> b.backoffStrategy(backoff)) + // Setting exception cost to ZERO disables the circuit-breaker + .fineTune(b -> b.tokenBucketExceptionCost(0)) + .givenExceptions(IAE, IAE) + // Acquired zero, capacity must be unchanged. + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + ); + } + + + static class TestCase { + final String name; + int attempts = 0; + String scope = "none"; + List exceptions = new ArrayList<>(); + StandardRetryStrategyImpl.Builder builder = + (StandardRetryStrategyImpl.Builder) + DefaultRetryStrategy.standardStrategyBuilder(); + Throwable thrown; + boolean shouldSucceed = false; + boolean succeeded; + Integer expectedCapacity; + DefaultRetryToken.TokenState expectedState; + DefaultRetryToken token; + + TestCase(String name) { + this.name = name; + builder = builder.tokenBucketExceptionCost(TEST_EXCEPTION_COST) + .tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(TEST_BUCKET_CAPACITY) + .build()); + } + + public TestCase fineTune(Function configurator) { + this.builder = configurator.apply(this.builder); + return this; + } + + public TestCase configure(Function configurator) { + this.builder = (StandardRetryStrategyImpl.Builder) configurator.apply(this.builder); + return this; + } + + public TestCase givenExceptions(Exception... exceptions) { + Collections.addAll(this.exceptions, exceptions); + return this; + } + + public TestCase expectSuccess() { + this.shouldSucceed = true; + return this; + } + + public TestCase expectThrows() { + this.shouldSucceed = false; + return this; + } + + public TestCase expectCapacity(Integer expectedCapacity) { + this.expectedCapacity = expectedCapacity; + return this; + } + + public TestCase expectState(DefaultRetryToken.TokenState expectedState) { + this.expectedState = expectedState; + return this; + } + + public void run() { + StandardRetryStrategy strategy = builder.build(); + runTestCase(this, strategy); + } + + public static void runTestCase(TestCase testCase, StandardRetryStrategy strategy) { + AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); + RetryToken token = res.token(); + testCase.succeeded = false; + BusinessLogic logic = new BusinessLogic(testCase.exceptions); + try { + while (!testCase.succeeded) { + try { + logic.call(); + testCase.succeeded = true; + RecordSuccessResponse response = strategy.recordSuccess(RecordSuccessRequest.create(token)); + token = response.token(); + testCase.token = (DefaultRetryToken) token; + } catch (Exception e) { + RefreshRetryTokenResponse refreshResponse = + strategy.refreshRetryToken(RefreshRetryTokenRequest.builder() + .token(token) + .failure(e) + .build()); + token = refreshResponse.token(); + } + } + } catch (TokenAcquisitionFailedException e) { + testCase.thrown = e; + testCase.succeeded = false; + testCase.token = (DefaultRetryToken) e.token(); + } + } + } + + static class BusinessLogic implements Callable { + List exceptions; + int invocation = 0; + + BusinessLogic(List exceptions) { + this.exceptions = exceptions; + } + + @Override + public Integer call() throws Exception { + if (invocation < exceptions.size()) { + throw exceptions.get(invocation++); + } + invocation++; + return invocation; + } + } +} diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 052841d55391..5ac732e4b87d 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -42,6 +42,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + retries + software.amazon.awssdk + ${awsjavasdk.version} + utils software.amazon.awssdk From dd63b907f7b0172e2413e0dd8f2dc72d45cc0b68 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Tue, 9 May 2023 10:12:16 -0700 Subject: [PATCH 04/32] Add adaptive retry strategy (#3975) * Add adaptive retry strategy * Address pull request comments * Address PR comments * Address PR comments --- .../awssdk/retries/AdaptiveRetryStrategy.java | 91 +++++ .../awssdk/retries/DefaultRetryStrategy.java | 26 +- .../awssdk/retries/StandardRetryStrategy.java | 10 +- .../internal/AdaptiveRetryStrategyImpl.java | 380 ++++++++++++++++++ .../circuitbreaker/TokenBucketStore.java | 15 +- .../RateLimiterAcquireResponse.java | 37 ++ .../ratelimiter/RateLimiterClock.java | 32 ++ .../ratelimiter/RateLimiterTokenBucket.java | 311 ++++++++++++++ .../RateLimiterTokenBucketStore.java | 78 ++++ .../RateLimiterUpdateResponse.java | 64 +++ .../internal/ratelimiter/SystemClock.java | 29 ++ ...eRetryStrategyResourceConstrainedTest.java | 302 ++++++++++++++ .../internal/AdaptiveRetryStrategyTest.java | 279 +++++++++++++ .../RateLimiterTokenBucketTest.java | 191 +++++++++ 14 files changed, 1833 insertions(+), 12 deletions(-) create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterAcquireResponse.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterClock.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucket.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketStore.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterUpdateResponse.java create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/SystemClock.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java new file mode 100644 index 000000000000..14ac2da01117 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries; + +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.internal.AdaptiveRetryStrategyImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; + +/** + * The adaptive retry strategy is a {@link RetryStrategy} when executing against a very resource-constrained set of resources. + *

+ * Unlike {@link StandardRetryStrategy}, care should be taken when using this strategy. Specifically, it should be used: + *

    + *
  1. When the availability of downstream resources are mostly affected by callers that are also using + * the {@link AdaptiveRetryStrategy}. + *
  2. The scope (either the whole strategy or the {@link AcquireInitialTokenRequest#scope}) of the strategy is constrained + * to target "resource", so that availability issues in one resource cannot delay other, unrelated resource's availability. + *

    + * The adaptive retry strategy by default: + *

      + *
    1. Retries on the conditions configured in the {@link Builder}. + *
    2. Retries 2 times (3 total attempts). Adjust with {@link Builder#maxAttempts} + *
    3. Uses a dynamic backoff delay based on load currently perceived against the downstream resource + *
    4. Circuit breaking (disabling retries) in the event of high downstream failures within an individual scope. + * Circuit breaking may prevent a first attempt in outage scenarios to protect the downstream service. + *
    + * + * @see StandardRetryStrategy + */ +@SdkPublicApi +@ThreadSafe +public interface AdaptiveRetryStrategy extends RetryStrategy { + + /** + * Create a new {@link AdaptiveRetryStrategy.Builder}. + * + *

    Example Usage + *

    +     * AdaptiveRetryStrategy retryStrategy =
    +     *     AdaptiveRetryStrategy.builder()
    +     *                          .retryOnExceptionInstanceOf(IllegalArgumentException.class)
    +     *                          .retryOnExceptionInstanceOf(IllegalStateException.class)
    +     *                          .build();
    +     * 
    + */ + static AdaptiveRetryStrategy.Builder builder() { + return AdaptiveRetryStrategyImpl + .builder() + .maxAttempts(DefaultRetryStrategy.Standard.MAX_ATTEMPTS) + .tokenBucketStore(TokenBucketStore.builder() + .tokenBucketMaxCapacity(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) + .build()) + .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) + .backoffStrategy(BackoffStrategy.exponentialDelay(DefaultRetryStrategy.Standard.BASE_DELAY, + DefaultRetryStrategy.Standard.MAX_BACKOFF)) + .rateLimiterTokenBucketStore(RateLimiterTokenBucketStore.builder().build()); + } + + @Override + Builder toBuilder(); + + interface Builder extends RetryStrategy.Builder { + /** + * Configure the predicate to allow the strategy categorize a Throwable as throttling exception. + */ + Builder treatAsThrottling(Predicate treatAsThrottling); + + + @Override + AdaptiveRetryStrategy build(); + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index f49545125553..e90277edc69d 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -44,13 +44,35 @@ private DefaultRetryStrategy() { public static StandardRetryStrategy.Builder standardStrategyBuilder() { return StandardRetryStrategy.builder() .maxAttempts(Standard.MAX_ATTEMPTS) - .backoffStrategy(BackoffStrategy.exponentialDelay(Standard.BASE_DELAY, Standard.MAX_BACKOFF)) - .circuitBreakerEnabled(true); + .backoffStrategy(BackoffStrategy.exponentialDelay(Standard.BASE_DELAY, Standard.MAX_BACKOFF)); + } + + /** + * Create a new builder for a {@code AdaptiveRetryStrategy}. + * + *

    Example Usage + *

    +     * AdaptiveRetryStrategy retryStrategy =
    +     *     RetryStrategies.adaptiveStrategyBuilder()
    +     *                    .retryOnExceptionInstanceOf(IllegalArgumentException.class)
    +     *                    .retryOnExceptionInstanceOf(IllegalStateException.class)
    +     *                    .build();
    +     * 
    + */ + public static AdaptiveRetryStrategy.Builder adaptiveRetryStrategyBuilder() { + return AdaptiveRetryStrategy.builder() + .maxAttempts(Adaptive.MAX_ATTEMPTS); } static final class Standard { static final int MAX_ATTEMPTS = 3; static final Duration BASE_DELAY = Duration.ofSeconds(1); static final Duration MAX_BACKOFF = Duration.ofSeconds(20); + static final int TOKEN_BUCKET_SIZE = 500; + static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; + } + + static final class Adaptive { + static final int MAX_ATTEMPTS = 3; } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java index 4cf8a4d20768..1e9db6f9f7c3 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.retries.internal.StandardRetryStrategyImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; /** * The standard retry strategy is the recommended {@link RetryStrategy} for normal use-cases. @@ -55,7 +56,14 @@ public interface StandardRetryStrategy extends RetryStrategy */ static Builder builder() { - return StandardRetryStrategyImpl.builder(); + return StandardRetryStrategyImpl + .builder() + .maxAttempts(DefaultRetryStrategy.Standard.MAX_ATTEMPTS) + .tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) + .build()) + .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE); } @Override diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java new file mode 100644 index 000000000000..e29e73114a06 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java @@ -0,0 +1,380 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterAcquireResponse; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucket; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterUpdateResponse; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link AdaptiveRetryStrategy} interface. + */ +@SdkInternalApi +public final class AdaptiveRetryStrategyImpl implements AdaptiveRetryStrategy { + private static final Logger LOG = Logger.loggerFor(AdaptiveRetryStrategyImpl.class); + private final List> retryPredicates; + private final int maxAttempts; + private final boolean circuitBreakerEnabled; + private final BackoffStrategy backoffStrategy; + private final int tokenBucketMaxCapacity; + private final int exceptionCost; + private final Predicate treatAsThrottling; + private final TokenBucketStore tokenBucketStore; + private final RateLimiterTokenBucketStore rateLimiterTokenBucketStore; + + private AdaptiveRetryStrategyImpl(Builder builder) { + this.retryPredicates = Collections.unmodifiableList(Validate.paramNotNull(builder.retryPredicates, "retryPredicates")); + this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); + this.circuitBreakerEnabled = builder.circuitBreakerEnabled; + this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); + this.exceptionCost = builder.exceptionCost; + this.tokenBucketMaxCapacity = builder.tokenBucketMaxCapacity; + this.treatAsThrottling = Validate.paramNotNull(builder.treatAsThrottling, "treatAsThrottling"); + this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); + this.rateLimiterTokenBucketStore = Validate.paramNotNull(builder.rateLimiterTokenBucketStore, + "rateLimiterTokenBucketStore"); + } + + @Override + public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + logAcquireInitialToken(request); + return AcquireInitialTokenResponse.create( + DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); + } + + @Override + public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + + // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. + // 1) is retryable? + throwOnNonRetryableException(request, acquireResponse); + // 2) max attempts reached? + throwOnMaxAttemptsReached(request, acquireResponse); + // 3) can we acquire a token? + throwOnAcquisitionFailure(request, acquireResponse); + + // All the conditions required to retry were meet, update the send rate if the error is categorized as throttling. + Throwable failure = request.failure(); + RateLimiterTokenBucket rateLimiterTokenBucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); + if (this.treatAsThrottling.test(failure)) { + rateLimiterTokenBucket.updateRateAfterThrottling(); + } + + // Refresh the retry token and compute the backoff delay. + DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); + Duration backoff = backoffStrategy.computeDelay(refreshedToken.attempt()); + + // Acquire capacity from the adaptive token. + RateLimiterAcquireResponse rateLimiterAcquireResponse = rateLimiterTokenBucket.tryAcquire(); + + // Take the max delay between the suggested delay, the backoff delay and the delay of the adaptive strategy. + Duration adaptiveDelay = rateLimiterAcquireResponse.delay(); + Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); + Duration finalDelay = maxOf(suggested, backoff).plus(adaptiveDelay); + + logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); + return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); + } + + @Override + public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + + // Update the adaptive token bucket. + updateAdaptiveTokenBucket(token); + + // Update the circuit breaker token bucket. + ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); + + // Refresh the retry token and return + DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); + + // Log success and return. + logRecordSuccess(token, releaseResponse); + return RecordSuccessResponse.create(refreshedToken); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns a builder to fine-tune this retry strategy. + * + * @return a builder for this retry strategy. + */ + public static Builder builder() { + return new Builder(); + } + + private Duration maxOf(Duration left, Duration right) { + if (left.compareTo(right) >= 0) { + return left; + } + return right; + } + + private RateLimiterUpdateResponse updateAdaptiveTokenBucket(DefaultRetryToken token) { + RateLimiterTokenBucket rateLimiterTokenBucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); + return rateLimiterTokenBucket.updateRateAfterSuccess(); + } + + private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { + TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); + int capacityReleased = token.capacityAcquired(); + return bucket.release(capacityReleased); + } + + private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { + return token.toBuilder() + .capacityAcquired(0) + .capacityRemaining(releaseResponse.currentCapacity()) + .state(DefaultRetryToken.TokenState.SUCCEEDED) + .build(); + } + + private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (acquireResponse.acquisitionFailed()) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .addFailure(failure) + .build(); + String message = acquisitionFailedMessage(acquireResponse); + LOG.error(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (maxAttemptsReached(token)) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .addFailure(failure) + .build(); + String message = maxAttemptsReachedMessage(refreshedToken); + LOG.error(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + Throwable failure = request.failure(); + if (isNonRetryableException(request)) { + String message = nonRetryableExceptionMessage(token); + LOG.error(() -> message, failure); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .addFailure(failure) + .build(); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + int attempt = token.attempt(); + LOG.warn(() -> String.format("Request attempt %d encountered retryable failure.", attempt), failure); + } + + private String nonRetryableExceptionMessage(DefaultRetryToken token) { + return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); + } + + private String maxAttemptsReachedMessage(DefaultRetryToken token) { + return String.format("Request will not be retried. Retries have been exhausted " + + "(cost: 0, capacity: %d/%d)", + token.capacityAcquired(), + token.capacityRemaining()); + } + + private String acquisitionFailedMessage(AcquireResponse acquireResponse) { + return String.format("Request will not be retried to protect the caller and downstream service. " + + "The cost of retrying (%d) " + + "exceeds the available retry capacity (%d/%d).", + acquireResponse.capacityRequested(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity()); + } + + private void logAcquireInitialToken(AcquireInitialTokenRequest request) { + // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); + LOG.debug(() -> String.format("Request attempt 1 token acquired " + + "(backoff: 0ms, cost: 0, capacity: %d/%d)", + tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); + } + + private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { + LOG.debug(() -> String.format("Request attempt %d token acquired " + + "(backoff: %dms, cost: %d, capacity: %d/%d)", + token.attempt(), delay.toMillis(), + acquireResponse.capacityAcquired(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity())); + } + + private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { + LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", + token.attempt(), release.capacityReleased(), + release.currentCapacity(), release.maxCapacity())); + + } + + private boolean maxAttemptsReached(DefaultRetryToken token) { + return token.attempt() >= maxAttempts; + } + + private boolean isNonRetryableException(RefreshRetryTokenRequest request) { + Throwable failure = request.failure(); + for (Predicate retryPredicate : retryPredicates) { + if (retryPredicate.test(failure)) { + return false; + } + } + return true; + } + + static DefaultRetryToken asStandardRetryToken(RetryToken token) { + return Validate.isInstanceOf(DefaultRetryToken.class, token, + "RetryToken is of unexpected class (%s), " + + "This token was not created by this retry strategy.", + token.getClass().getName()); + } + + private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + return tokenBucket.tryAcquire(exceptionCost); + } + + private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + return token.toBuilder() + .increaseAttempt() + .state(DefaultRetryToken.TokenState.IN_PROGRESS) + .capacityAcquired(acquireResponse.capacityAcquired()) + .capacityRemaining(acquireResponse.capacityRemaining()) + .addFailure(request.failure()) + .build(); + } + + public static class Builder implements AdaptiveRetryStrategy.Builder { + private List> retryPredicates; + private int maxAttempts; + private boolean circuitBreakerEnabled; + private int tokenBucketMaxCapacity; + private int exceptionCost; + private Predicate treatAsThrottling; + private BackoffStrategy backoffStrategy; + private TokenBucketStore tokenBucketStore; + private RateLimiterTokenBucketStore rateLimiterTokenBucketStore; + + Builder() { + retryPredicates = new ArrayList<>(); + } + + Builder(AdaptiveRetryStrategyImpl strategy) { + this.retryPredicates = new ArrayList<>(strategy.retryPredicates); + this.maxAttempts = strategy.maxAttempts; + this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; + this.tokenBucketMaxCapacity = strategy.tokenBucketMaxCapacity; + this.exceptionCost = strategy.exceptionCost; + this.treatAsThrottling = strategy.treatAsThrottling; + this.backoffStrategy = strategy.backoffStrategy; + this.tokenBucketStore = strategy.tokenBucketStore; + this.rateLimiterTokenBucketStore = strategy.rateLimiterTokenBucketStore; + } + + @Override + public Builder retryOnException(Predicate shouldRetry) { + this.retryPredicates.add(shouldRetry); + return this; + } + + @Override + public Builder maxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + return this; + } + + @Override + public Builder treatAsThrottling(Predicate treatAsThrottling) { + this.treatAsThrottling = treatAsThrottling; + return this; + } + + public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { + this.tokenBucketStore = tokenBucketStore; + return this; + } + + public Builder rateLimiterTokenBucketStore(RateLimiterTokenBucketStore rateLimiterTokenBucketStore) { + this.rateLimiterTokenBucketStore = rateLimiterTokenBucketStore; + return this; + } + + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + this.backoffStrategy = backoffStrategy; + return this; + } + + public Builder tokenBucketExceptionCost(int exceptionCost) { + this.exceptionCost = exceptionCost; + return this; + } + + @Override + public AdaptiveRetryStrategyImpl build() { + return new AdaptiveRetryStrategyImpl(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java index f949d8903fe9..534a39e84889 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/circuitbreaker/TokenBucketStore.java @@ -17,9 +17,9 @@ import java.util.LinkedHashMap; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.cache.lru.LruCache; /** * A store to keep token buckets per scope. @@ -29,12 +29,14 @@ public final class TokenBucketStore { private static final int DEFAULT_MAX_TOKENS = 500; private static final int MAX_ENTRIES = 128; private final int tokenBucketMaxCapacity; - private final Map scopeToTokenBucket; + private final LruCache scopeToTokenBucket; @SuppressWarnings("serial") private TokenBucketStore(Builder builder) { this.tokenBucketMaxCapacity = builder.tokenBucketMaxCapacity; - this.scopeToTokenBucket = new ConcurrentHashMap<>(new LruMap<>()); + this.scopeToTokenBucket = LruCache.builder(x -> new TokenBucket(tokenBucketMaxCapacity)) + .maxSize(MAX_ENTRIES) + .build(); } /** @@ -42,8 +44,7 @@ private TokenBucketStore(Builder builder) { */ public TokenBucket tokenBucketForScope(String scope) { Validate.paramNotNull(scope, "scope"); - return scopeToTokenBucket.computeIfAbsent(scope, - key -> new TokenBucket(tokenBucketMaxCapacity)); + return scopeToTokenBucket.get(scope); } /** @@ -78,10 +79,6 @@ public static class Builder { tokenBucketMaxCapacity = DEFAULT_MAX_TOKENS; } - Builder(TokenBucketStore store) { - this.tokenBucketMaxCapacity = store.tokenBucketMaxCapacity; - } - public Builder tokenBucketMaxCapacity(int tokenBucketMaxCapacity) { this.tokenBucketMaxCapacity = tokenBucketMaxCapacity; return this; diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterAcquireResponse.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterAcquireResponse.java new file mode 100644 index 000000000000..58c58b47fefa --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterAcquireResponse.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class RateLimiterAcquireResponse { + private final Duration delay; + + private RateLimiterAcquireResponse(Duration delay) { + this.delay = delay; + } + + public Duration delay() { + return delay; + } + + public static RateLimiterAcquireResponse create(Duration waitTime) { + return new RateLimiterAcquireResponse(waitTime); + } + +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterClock.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterClock.java new file mode 100644 index 000000000000..b6c509db6408 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterClock.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public interface RateLimiterClock { + /** + * Returns the current time in seconds, and should include sub second resolution. This class needs not to be related to the + * actual wall clock-time or system as it's only used to measure elapsed time. + * + *

    For instance, it the current time is

    PT2M8.067S
    , i.e., 2 minutes with 8 seconds and 67 milliseconds, this + * method will return
    128.067
    . + * + * @return the current time in seconds, and should include sub second resolution + */ + double time(); +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucket.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucket.java new file mode 100644 index 000000000000..2d8743af9924 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucket.java @@ -0,0 +1,311 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import java.time.Duration; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * The {@link RateLimiterTokenBucket} keeps track of past throttling responses and adapts to slow down the send rate to adapt to + * the service. It does this by suggesting a delay amount as result of a {@link #tryAcquire()} call. Callers must update its + * internal state by calling {@link #updateRateAfterThrottling()} when getting a throttling response or + * {@link #updateRateAfterSuccess()} when getting successful response. + * + *

    This class is thread-safe, its internal current state is kept in the inner class {@link PersistentState} which is stored + * using an {@link AtomicReference}. This class is converted to {@link TransientState} when the state needs to be mutated and + * converted back to a {@link PersistentState} and stored using {@link AtomicReference#compareAndSet(Object, Object)}. + * + *

    The algorithm used is adapted from the network congestion avoidance algorithm + * CUBIC. + */ +@SdkInternalApi +public class RateLimiterTokenBucket { + private final AtomicReference stateReference; + private final RateLimiterClock clock; + + RateLimiterTokenBucket(RateLimiterClock clock) { + this.clock = clock; + this.stateReference = new AtomicReference<>(new PersistentState()); + } + + /** + * Acquire tokens from the bucket. If the bucket contains enough capacity to satisfy the request, this method will return in + * {@link RateLimiterAcquireResponse#delay()} a {@link Duration#ZERO} value, otherwise it will return the amount of time the + * callers need to wait until enough tokens are refilled. + */ + public RateLimiterAcquireResponse tryAcquire() { + StateUpdate update = updateState(ts -> ts.tokenBucketAcquire(clock, 1.0)); + return RateLimiterAcquireResponse.create(update.result); + } + + /** + * Updates the estimated send rate after a throttling response. + */ + public RateLimiterUpdateResponse updateRateAfterThrottling() { + StateUpdate update = consumeState(ts -> ts.updateClientSendingRate(clock, true)); + return RateLimiterUpdateResponse.builder() + .measuredTxRate(update.newState.measuredTxRate()) + .fillRate(update.newState.fillRate()) + .build(); + } + + /** + * Updates the estimated send rate after a successful response. + */ + public RateLimiterUpdateResponse updateRateAfterSuccess() { + StateUpdate update = consumeState(ts -> ts.updateClientSendingRate(clock, false)); + return RateLimiterUpdateResponse.builder() + .measuredTxRate(update.newState.measuredTxRate()) + .fillRate(update.newState.fillRate()) + .build(); + } + + /** + * Similar to {@link #updateState} but used when the caller only cares about the side effects of the {@link Consumer} but not + * for the value returned. + */ + private StateUpdate consumeState(Consumer mutator) { + return updateState(ts -> { + mutator.accept(ts); + return null; + }); + } + + /** + * Converts the stored persistent state into a transient one and transforms it using the provided function. The provided + * function is expected to update the transient state in-place and return a value that will be returned to the caller in the + * {@link StateUpdate#result} field. The mutated transient value is converted back to a persistent one and stored in the + * atomic reference if no changes were made in-between. If another thread changes the value in-between, the operation is + * retried until succeeded. + */ + private StateUpdate updateState(Function mutator) { + PersistentState current; + PersistentState updated; + T result; + do { + current = stateReference.get(); + TransientState transientState = current.toTransient(); + result = mutator.apply(transientState); + updated = transientState.toPersistent(); + } while (!stateReference.compareAndSet(current, updated)); + + return new StateUpdate<>(updated, result); + } + + static class StateUpdate { + private final PersistentState newState; + private final T result; + + StateUpdate(PersistentState newState, T result) { + this.newState = newState; + this.result = result; + } + } + + static final class TransientState { + private static final double MIN_FILL_RATE = 0.5; + private static final double MIN_CAPACITY = 1.0; + private static final double SMOOTH = 0.8; + private static final double BETA = 0.7; + private static final double SCALE_CONSTANT = 0.4; + private double fillRate; + private double maxCapacity; + private double currentCapacity; + private boolean lastTimestampIsSet; + private double lastTimestamp; + private boolean enabled; + private double measuredTxRate; + private double lastTxRateBucket; + private long requestCount; + private double lastMaxRate; + private double lastThrottleTime; + private double timeWindow; + private double newTokenBucketRate; + + private TransientState(PersistentState state) { + this.fillRate = state.fillRate; + this.maxCapacity = state.maxCapacity; + this.currentCapacity = state.currentCapacity; + this.lastTimestampIsSet = state.lastTimestampIsSet; + this.lastTimestamp = state.lastTimestamp; + this.enabled = state.enabled; + this.measuredTxRate = state.measuredTxRate; + this.lastTxRateBucket = state.lastTxRateBucket; + this.requestCount = state.requestCount; + this.lastMaxRate = state.lastMaxRate; + this.lastThrottleTime = state.lastThrottleTime; + this.timeWindow = state.timeWindow; + this.newTokenBucketRate = state.newTokenBucketRate; + } + + PersistentState toPersistent() { + return new PersistentState(this); + } + + /** + * Acquire tokens from the bucket. If the bucket contains enough capacity to satisfy the request, this method will return + * a {@link Duration#ZERO} value, otherwise it will return the amount of time the callers need to wait until enough tokens + * are refilled. + */ + Duration tokenBucketAcquire(RateLimiterClock clock, double amount) { + if (!this.enabled) { + return Duration.ZERO; + } + refill(clock); + double waitTime = 0.0; + if (this.currentCapacity < amount) { + waitTime = (amount - this.currentCapacity) / this.fillRate; + } + this.currentCapacity -= amount; + return Duration.ofNanos((long) (waitTime * 1_000_000_000.0)); + } + + /** + * Updates the sending rate depending on whether the response was successful or + * we got a throttling response. + */ + void updateClientSendingRate(RateLimiterClock clock, boolean throttlingResponse) { + updateMeasuredRate(clock); + double calculatedRate; + if (throttlingResponse) { + double rateToUse; + if (!this.enabled) { + rateToUse = this.measuredTxRate; + } else { + rateToUse = Math.min(this.measuredTxRate, this.fillRate); + } + + this.lastMaxRate = rateToUse; + calculateTimeWindow(); + this.lastThrottleTime = clock.time(); + calculatedRate = cubicThrottle(rateToUse); + this.enabled = true; + } else { + calculateTimeWindow(); + calculatedRate = cubicSuccess(clock.time()); + } + + double newRate = Math.min(calculatedRate, 2 * this.measuredTxRate); + updateRate(clock, newRate); + } + + void refill(RateLimiterClock clock) { + double timestamp = clock.time(); + if (this.lastTimestampIsSet) { + double fillAmount = (timestamp - this.lastTimestamp) * this.fillRate; + this.currentCapacity = Math.min(this.maxCapacity, this.currentCapacity + fillAmount); + } + this.lastTimestamp = timestamp; + this.lastTimestampIsSet = true; + } + + void updateRate(RateLimiterClock clock, double newRps) { + refill(clock); + this.fillRate = Math.max(newRps, MIN_FILL_RATE); + this.maxCapacity = Math.max(newRps, MIN_CAPACITY); + this.currentCapacity = Math.min(this.currentCapacity, this.maxCapacity); + this.newTokenBucketRate = newRps; + } + + void updateMeasuredRate(RateLimiterClock clock) { + double time = clock.time(); + this.requestCount += 1; + double timeBucket = Math.floor(time * 2) / 2; + if (timeBucket > this.lastTxRateBucket) { + double currentRate = this.requestCount / (timeBucket - this.lastTxRateBucket); + this.measuredTxRate = (currentRate * SMOOTH) + (this.measuredTxRate * (1 - SMOOTH)); + this.requestCount = 0; + this.lastTxRateBucket = timeBucket; + } + } + + void calculateTimeWindow() { + this.timeWindow = Math.pow((this.lastMaxRate * (1 - BETA)) / SCALE_CONSTANT, 1.0 / 3); + } + + double cubicSuccess(double timestamp) { + double delta = timestamp - this.lastThrottleTime; + return (SCALE_CONSTANT * Math.pow(delta - this.timeWindow, 3)) + this.lastMaxRate; + } + + double cubicThrottle(double rateToUse) { + return rateToUse * BETA; + } + } + + static final class PersistentState { + private final double fillRate; + private final double maxCapacity; + private final double currentCapacity; + private final boolean lastTimestampIsSet; + private final double lastTimestamp; + private final boolean enabled; + private final double measuredTxRate; + private final double lastTxRateBucket; + private final long requestCount; + private final double lastMaxRate; + private final double lastThrottleTime; + private final double timeWindow; + private final double newTokenBucketRate; + + private PersistentState() { + this.fillRate = 0; + this.maxCapacity = 0; + this.currentCapacity = 0; + this.lastTimestampIsSet = false; + this.lastTimestamp = 0; + this.enabled = false; + this.measuredTxRate = 0; + this.lastTxRateBucket = 0; + this.requestCount = 0; + this.lastMaxRate = 0; + this.lastThrottleTime = 0; + this.timeWindow = 0; + this.newTokenBucketRate = 0; + } + + PersistentState(TransientState state) { + this.fillRate = state.fillRate; + this.maxCapacity = state.maxCapacity; + this.currentCapacity = state.currentCapacity; + this.lastTimestampIsSet = state.lastTimestampIsSet; + this.lastTimestamp = state.lastTimestamp; + this.enabled = state.enabled; + this.measuredTxRate = state.measuredTxRate; + this.lastTxRateBucket = state.lastTxRateBucket; + this.requestCount = state.requestCount; + this.lastMaxRate = state.lastMaxRate; + this.lastThrottleTime = state.lastThrottleTime; + this.timeWindow = state.timeWindow; + this.newTokenBucketRate = state.newTokenBucketRate; + } + + TransientState toTransient() { + return new TransientState(this); + } + + public double fillRate() { + return fillRate; + } + + public double measuredTxRate() { + return measuredTxRate; + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketStore.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketStore.java new file mode 100644 index 000000000000..303e6d61d7f5 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketStore.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ToBuilderIgnoreField; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; +import software.amazon.awssdk.utils.cache.lru.LruCache; + +/** + * A store to keep token buckets per scope. + */ +@SdkInternalApi +public final class RateLimiterTokenBucketStore + implements ToCopyableBuilder { + private static final int MAX_ENTRIES = 128; + private static final RateLimiterClock DEFAULT_CLOCK = new SystemClock(); + private final LruCache scopeToTokenBucket; + private final RateLimiterClock clock; + + private RateLimiterTokenBucketStore(Builder builder) { + this.clock = Validate.paramNotNull(builder.clock, "clock"); + this.scopeToTokenBucket = LruCache.builder(x -> new RateLimiterTokenBucket(clock)) + .maxSize(MAX_ENTRIES) + .build(); + } + + public RateLimiterTokenBucket tokenBucketForScope(String scope) { + return scopeToTokenBucket.get(scope); + } + + @Override + @ToBuilderIgnoreField("scopeToTokenBucket") + public Builder toBuilder() { + return new Builder(this); + } + + public static RateLimiterTokenBucketStore.Builder builder() { + return new Builder(); + } + + public static class Builder implements CopyableBuilder { + private RateLimiterClock clock; + + Builder() { + this.clock = DEFAULT_CLOCK; + } + + Builder(RateLimiterTokenBucketStore store) { + this.clock = store.clock; + } + + public Builder clock(RateLimiterClock clock) { + this.clock = clock; + return this; + } + + @Override + public RateLimiterTokenBucketStore build() { + return new RateLimiterTokenBucketStore(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterUpdateResponse.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterUpdateResponse.java new file mode 100644 index 000000000000..cbf711ab3523 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterUpdateResponse.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public class RateLimiterUpdateResponse { + private double measuredTxRate; + private double fillRate; + + private RateLimiterUpdateResponse(Builder builder) { + this.measuredTxRate = Validate.paramNotNull(builder.measuredTxRate, "measuredTxRate"); + this.fillRate = Validate.paramNotNull(builder.fillRate, "fillRate"); + } + + public double measuredTxRate() { + return measuredTxRate; + } + + public double fillRate() { + return fillRate; + } + + public static Builder builder() { + return new Builder(); + } + + static class Builder { + private Double measuredTxRate; + private Double fillRate; + + Builder() { + } + + public Builder measuredTxRate(double measuredTxRate) { + this.measuredTxRate = measuredTxRate; + return this; + } + + public Builder fillRate(double fillRate) { + this.fillRate = fillRate; + return this; + } + + public RateLimiterUpdateResponse build() { + return new RateLimiterUpdateResponse(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/SystemClock.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/SystemClock.java new file mode 100644 index 000000000000..b12a7c97c37c --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/ratelimiter/SystemClock.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +class SystemClock implements RateLimiterClock { + @Override + public double time() { + // The value returned by this method is expected to + // be in seconds with fractional value. We make the + // conversion here. + return System.nanoTime() / 1_000_000_000.0; + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java new file mode 100644 index 000000000000..59a9614ec361 --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java @@ -0,0 +1,302 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.withinPercentage; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; + +/** + * Test for the AdaptiveRetryStrategy correctness and thread safety. This test creates a producer consumer scenario with a limited + * set of consumer workers (server) and a larger amount of producers (client) that create random pairs of matrices of fixed size + * for the server to multiply them. This scenario simulates the expected use of the adaptive retry strategy where all the calls + * are made to a very resource-constrained set of resources, and we expect that the delays created by the rate limiter allows the + * perceived availability for the clients to be close to 1.0. + */ +public class AdaptiveRetryStrategyResourceConstrainedTest { + + @Test + public void seemsToBeCorrectAndThreadSafe() { + // Arrange the test. We allocate a single thread for each server worker + // and for each client worker. + int serverWorkers = 1; + int clientWorkers = 5; + int parallelism = serverWorkers + clientWorkers; + ExecutorService executor = Executors.newFixedThreadPool(parallelism); + Server server = new Server(serverWorkers, executor); + RateLimiterTokenBucketStore store = RateLimiterTokenBucketStore.builder().build(); + AdaptiveRetryStrategy strategy = AdaptiveRetryStrategyImpl + .builder() + // We don't care about how many attempts we allow to, that logic is tested somewhere else. + // so we give the strategy plenty of room for retries. + .maxAttempts(20) + .tokenBucketStore(TokenBucketStore.builder().tokenBucketMaxCapacity(10_000).build()) + // Just wait for the rate limiter delays. + .backoffStrategy(BackoffStrategy.retryImmediately()) + .rateLimiterTokenBucketStore(RateLimiterTokenBucketStore.builder().build()) + .retryOnExceptionInstanceOf(ThrottlingException.class) + .treatAsThrottling(x -> x instanceof ThrottlingException) + .build(); + List clients = createClients(server, strategy, clientWorkers, 8); + + // Start the clients and wait for all of them to complete. + CompletableFuture.allOf(clients.stream() + .map(client -> CompletableFuture.runAsync(client::processAllJobs, executor)) + .toArray(CompletableFuture[]::new)) + .join(); + + server.stop(); + // Assert here that the average of the perceived availability, that is the (number of jobs / attempts) is close to + // 1.0 within 20%. + double total = clients.stream().mapToDouble(Client::perceivedAvailability).sum(); + double avg = total / clients.size(); + + assertThat(avg).isCloseTo(1.0, withinPercentage(20)); + executor.shutdown(); + } + + public static List createClients(Server server, AdaptiveRetryStrategy strategy, int amount, int jobsPerClient) { + return IntStream.range(0, amount) + .mapToObj(idx -> createClient(server, strategy, jobsPerClient)) + .collect(Collectors.toCollection(() -> new ArrayList<>(amount))); + } + + private static Client createClient(Server server, AdaptiveRetryStrategy strategy, int jobs) { + return new Client(createJobs(jobs), server, strategy); + } + + public static List createJobs(int amount) { + // We use a non-small but fixed size here instead of random ones to have a more predictable workload. + int rows = 256; + int cols = 256 + 128; + return IntStream.range(0, amount) + .mapToObj(idx -> createRandomJob(rows, cols)) + .collect(Collectors.toCollection(() -> new ArrayList<>(amount))); + } + + private static Job createRandomJob(int rows, int cols) { + double[][] left = generateMatrix(rows, cols); + double[][] right = generateMatrix(cols, rows); + return new Job(left, right); + } + + public static double[][] generateMatrix(int rows, int cols) { + int bias = 16777619; + Random rand = ThreadLocalRandom.current(); + double[][] matrix = new double[rows][cols]; + for (int row = 0; row < rows; row++) { + for (int col = 0; col < cols; col++) { + matrix[row][col] = bias * rand.nextDouble(); + } + } + return matrix; + } + + static class Client { + private final List jobs; + private final Server server; + private final AdaptiveRetryStrategy strategy; + private int attempts = 0; + + Client(List jobs, Server server, AdaptiveRetryStrategy strategy) { + this.jobs = jobs; + this.server = server; + this.strategy = strategy; + } + + public void processAllJobs() { + for (Job job : jobs) { + process(job); + } + } + + public void process(Job job) { + // submit job + AcquireInitialTokenResponse response = strategy.acquireInitialToken(AcquireInitialTokenRequest.create("client")); + RetryToken token = response.token(); + sleep(response.delay()); + do { + try { + ++attempts; + server.accept(job); + break; + } catch (Throwable e) { + RefreshRetryTokenResponse refreshResponse = + strategy.refreshRetryToken(RefreshRetryTokenRequest.builder() + .token(token) + .failure(e) + .build()); + token = refreshResponse.token(); + sleep(refreshResponse.delay()); + } + } while (true); + + // Block until the job is completed. + synchronized (job.guard) { + while (true) { + try { + job.guard.wait(5); + } catch (InterruptedException ignored) { + } + if (job.isDone()) { + strategy.recordSuccess(RecordSuccessRequest.create(token)); + break; + } + } + } + } + + void sleep(Duration duration) { + if (!duration.isZero()) { + try { + TimeUnit.MILLISECONDS.sleep(duration.toMillis()); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + double perceivedAvailability() { + return jobs.size() / (double) attempts; + } + } + + static class Server { + private static final Job POISON_PILL = new Job(new double[0][0], new double[0][0]); + private final BlockingQueue jobQueue; + private final List> workers; + + Server(int totalWorkers, ExecutorService executor) { + this.jobQueue = new ArrayBlockingQueue<>(totalWorkers * 2); + this.workers = IntStream.range(0, totalWorkers) + .mapToObj(idx -> CompletableFuture.runAsync(new ServerWorker(jobQueue), executor)) + .collect(Collectors.toCollection(() -> new ArrayList<>(totalWorkers))); + } + + void stop() { + try { + for (int idx = 0; idx < workers.size(); idx++) { + jobQueue.put(POISON_PILL); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + CompletableFuture.allOf(workers.toArray(new CompletableFuture[0])).join(); + } + + public void accept(Job job) { + if (!jobQueue.offer(job)) { + // No space left in the queue to take this job, throw a ThrottlingException to notify the + // client about it and let him retry at a later time. + throw new ThrottlingException(); + } + } + } + + static class ServerWorker implements Runnable { + private final BlockingQueue jobQueue; + + ServerWorker(BlockingQueue jobQueue) { + this.jobQueue = jobQueue; + } + + public void run() { + while (true) { + try { + Job job = jobQueue.poll(10, TimeUnit.MILLISECONDS); + if (job == Server.POISON_PILL) { + // Break from the loop, this signals that the work is done. + break; + } + if (job != null) { + synchronized (job.guard) { + // Process the request and notify the client when the result is ready. + job.setResult(multiplyMatrices(job.left, job.right)); + job.guard.notifyAll(); + } + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + // Actual logic. We use a matrix multiplication instead of sleeping for a random + // amount of time to get some predictable workload. + static double[][] multiplyMatrices(double[][] firstMatrix, double[][] secondMatrix) { + double[][] result = new double[firstMatrix.length][secondMatrix[0].length]; + for (int row = 0; row < result.length; row++) { + for (int col = 0; col < result[row].length; col++) { + result[row][col] = multiplyMatricesCell(firstMatrix, secondMatrix, row, col); + } + } + return result; + } + + static double multiplyMatricesCell(double[][] firstMatrix, double[][] secondMatrix, int row, int col) { + return IntStream.range(0, secondMatrix.length).mapToDouble(i -> firstMatrix[row][i] * secondMatrix[i][col]).sum(); + } + } + + static class Job { + final Object guard = new Object(); + final double[][] left; + final double[][] right; + double[][] result; + + Job(double[][] left, double[][] right) { + this.left = left; + this.right = right; + this.result = null; + } + + boolean isDone() { + return result != null; + } + + void setResult(double[][] result) { + this.result = result; + } + } + + static class ThrottlingException extends RuntimeException { + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java new file mode 100644 index 000000000000..d452f31f6f91 --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java @@ -0,0 +1,279 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.function.Function; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; +import software.amazon.awssdk.retries.api.internal.RecordSuccessRequestImpl; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenRequestImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; + +// The tests here are the same set of test from the StandardRetryStrategy, both should be passing the same battery of tests. +// Unfortunately It's not possible to create a single parametrized test on RetryStrategy given the different types required to +// configure each strategy. +public class AdaptiveRetryStrategyTest { + static final int TEST_BUCKET_CAPACITY = 100; + static final int TEST_EXCEPTION_COST = 5; + static final IllegalArgumentException IAE = new IllegalArgumentException(); + static final RuntimeException RTE = new RuntimeException(); + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + testCase.run(); + if (testCase.shouldSucceed) { + assertThat(testCase.thrown) + .as(testCase.name) + .isNull(); + } else { + assertThat(testCase.thrown) + .as(testCase.name) + .isNotNull(); + } + assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); + assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); + assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); + } + + public static Collection parameters() { + BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); + return Arrays.asList( + new TestCase("Succeeds when no exceptions are thrown") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Succeeds when 1 exception is thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE) + // Acquire cost and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE) + // Acquire (cost * 2) and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , new TestCase("Fails when 3 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , new TestCase("Fails when 4 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , new TestCase("Fails when non-retryable exception throw in the 1st attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , new TestCase("Fails when non-retryable exception throw in the 2nd attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .givenExceptions(IAE, RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , new TestCase("Exhausts the token bucket.") + .configure(b -> b.maxAttempts(5)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + .fineTune(b -> b.tokenBucketStore( TokenBucketStore + .builder() + .tokenBucketMaxCapacity(10) + .build())) + .givenExceptions(IAE, IAE, IAE) + .expectCapacity(0) + .expectState(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .expectThrows() + , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .fineTune(b -> b.backoffStrategy(backoff)) + // Setting exception cost to ZERO disables the circuit-breaker + .fineTune(b -> b.tokenBucketExceptionCost(0)) + .givenExceptions(IAE, IAE) + // Acquired zero, capacity must be unchanged. + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + + ); + } + + static class TestCase { + final String name; + int attempts = 0; + String scope = "none"; + List exceptions = new ArrayList<>(); + AdaptiveRetryStrategyImpl.Builder builder = + (AdaptiveRetryStrategyImpl.Builder) + DefaultRetryStrategy.adaptiveRetryStrategyBuilder(); + Throwable thrown; + boolean shouldSucceed = false; + boolean succeeded; + Integer expectedCapacity; + DefaultRetryToken.TokenState expectedState; + DefaultRetryToken token; + + TestCase(String name) { + this.name = name; + builder = builder.tokenBucketExceptionCost(TEST_EXCEPTION_COST) + .treatAsThrottling(t -> false) + .tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(TEST_BUCKET_CAPACITY) + .build()) + .rateLimiterTokenBucketStore(RateLimiterTokenBucketStore + .builder() + .build()); + } + + public TestCase configure(Function configurator) { + this.builder = (AdaptiveRetryStrategyImpl.Builder) configurator.apply(this.builder); + return this; + } + + public TestCase fineTune(Function configurator) { + this.builder = configurator.apply(this.builder); + return this; + } + + public TestCase givenExceptions(Exception... exceptions) { + Collections.addAll(this.exceptions, exceptions); + return this; + } + + public TestCase expectSuccess() { + this.shouldSucceed = true; + return this; + } + + public TestCase expectThrows() { + this.shouldSucceed = false; + return this; + } + + public TestCase expectCapacity(Integer expectedCapacity) { + this.expectedCapacity = expectedCapacity; + return this; + } + + public TestCase expectState(DefaultRetryToken.TokenState expectedState) { + this.expectedState = expectedState; + return this; + } + + public void run() { + AdaptiveRetryStrategy strategy = builder.build(); + runTestCase(this, strategy); + } + + public static void runTestCase(TestCase testCase, AdaptiveRetryStrategy strategy) { + AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); + RetryToken token = res.token(); + testCase.succeeded = false; + BusinessLogic logic = new BusinessLogic(testCase.exceptions); + try { + while (!testCase.succeeded) { + try { + logic.call(); + testCase.succeeded = true; + RecordSuccessResponse response = strategy.recordSuccess(RecordSuccessRequestImpl.create(token)); + token = response.token(); + testCase.token = (DefaultRetryToken) token; + } catch (Exception e) { + RefreshRetryTokenResponse refreshResponse = + strategy.refreshRetryToken(RefreshRetryTokenRequestImpl.builder() + .token(token) + .failure(e) + .build()); + token = refreshResponse.token(); + } + } + } catch (TokenAcquisitionFailedException e) { + testCase.thrown = e; + testCase.succeeded = false; + testCase.token = (DefaultRetryToken) e.token(); + } + } + } + + static class BusinessLogic implements Callable { + List exceptions; + int invocation = 0; + + BusinessLogic(List exceptions) { + this.exceptions = exceptions; + } + + @Override + public Integer call() throws Exception { + if (invocation < exceptions.size()) { + throw exceptions.get(invocation++); + } + invocation++; + return invocation; + } + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java new file mode 100644 index 000000000000..fc91c6bcfa9f --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java @@ -0,0 +1,191 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal.ratelimiter; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.within; + +import java.util.Arrays; +import java.util.Collection; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +public class RateLimiterTokenBucketTest { + private static MutableClock clock = null; + private static RateLimiterTokenBucket tokenBucket = null; + private static final double EPSILON = 0.0001; + + @BeforeAll + static void setup() { + clock = new MutableClock(); + tokenBucket = new RateLimiterTokenBucket(clock); + } + + @ParameterizedTest + @MethodSource("parameters") + public void testCase(TestCase testCase) { + clock.setCurrent(testCase.givenTimestamp); + RateLimiterUpdateResponse res; + tokenBucket.tryAcquire(); + if (testCase.throttleResponse) { + res = tokenBucket.updateRateAfterThrottling(); + } else { + res = tokenBucket.updateRateAfterSuccess(); + } + double measuredTxRate = res.measuredTxRate(); + assertThat(measuredTxRate).isCloseTo(testCase.expectMeasuredTxRate, within(EPSILON)); + double fillRate = res.fillRate(); + assertThat(fillRate).isCloseTo(testCase.expectFillRate, within(EPSILON)); + } + + + public static Collection parameters() { + return Arrays.asList( + new TestCase() + .givenSuccessResponse() + .givenTimestamp(0.2) + .expectMeasuredTxRate(0.000000) + .expectFillRate(0.500000) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(0.4) + .expectMeasuredTxRate(0.000000) + .expectFillRate(0.500000) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(0.6) + .expectMeasuredTxRate(4.800000) + .expectFillRate(0.500000) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(0.8) + .expectMeasuredTxRate(4.800000) + .expectFillRate(0.500000) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(1.0) + .expectMeasuredTxRate(4.160000) + .expectFillRate(0.500000) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(1.2) + .expectMeasuredTxRate(4.160000) + .expectFillRate(0.691200) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(1.4) + .expectMeasuredTxRate(4.160000) + .expectFillRate(1.097600) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(1.6) + .expectMeasuredTxRate(5.632000) + .expectFillRate(1.638400) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(1.8) + .expectMeasuredTxRate(5.632000) + .expectFillRate(2.332800) + , new TestCase() + .givenThrottleResponse() + .givenTimestamp(2.0) + .expectMeasuredTxRate(4.326400) + .expectFillRate(3.028480) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(2.2) + .expectMeasuredTxRate(4.326400) + .expectFillRate(3.486639) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(2.4) + .expectMeasuredTxRate(4.326400) + .expectFillRate(3.821874) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(2.6) + .expectMeasuredTxRate(5.665280) + .expectFillRate(4.053386) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(2.8) + .expectMeasuredTxRate(5.665280) + .expectFillRate(4.200373) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(3.0) + .expectMeasuredTxRate(4.333056) + .expectFillRate(4.282037) + , new TestCase() + .givenThrottleResponse() + .givenTimestamp(3.2) + .expectMeasuredTxRate(4.333056) + .expectFillRate(2.997426) + , new TestCase() + .givenSuccessResponse() + .givenTimestamp(3.4) + .expectMeasuredTxRate(4.333056) + .expectFillRate(3.452226) + ); + } + + static class TestCase { + private boolean throttleResponse; + private double givenTimestamp; + private double expectMeasuredTxRate; + private double expectFillRate; + + TestCase givenSuccessResponse() { + this.throttleResponse = false; + return this; + } + + TestCase givenThrottleResponse() { + this.throttleResponse = true; + return this; + } + + TestCase givenTimestamp(double givenTimestamp) { + this.givenTimestamp = givenTimestamp; + return this; + } + + TestCase expectMeasuredTxRate(double expectMeasuredTxRate) { + this.expectMeasuredTxRate = expectMeasuredTxRate; + return this; + } + + TestCase expectFillRate(double expectFillRate) { + this.expectFillRate = expectFillRate; + return this; + } + + } + + static class MutableClock implements RateLimiterClock { + private double current; + + @Override + public double time() { + return current; + } + + public void setCurrent(double current) { + this.current = current; + } + } +} From 1e4dfc4690651f9f61b58df4ad37b2badc0f2547 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Tue, 9 May 2023 14:30:17 -0700 Subject: [PATCH 05/32] Update retries and retries-api to snapshot version: 2.20.64-SNAPSHOT --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 5dc96c1e9042..b964b018da89 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.7-SNAPSHOT + 2.20.64-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index bbdeb4f0c342..895bef8c022f 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.7-SNAPSHOT + 2.20.64-SNAPSHOT 4.0.0 From 840a2bf3bbc16b615309ac91d049612b0921f214 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 10 May 2023 16:26:12 -0700 Subject: [PATCH 06/32] Fix SonarCloud code smells (#3991) * Fix SonarCloud code smells * Move AdaptiveRetryStrategyResourceConstrainedTest to an integration test This change is to workaround the SonarCloud code smell of the Sleep usage in this test --- .../api/TokenAcquisitionFailedException.java | 2 +- .../retries/api/RetryStrategyBuilderTest.java | 6 +++--- .../backoff/ExponentialDelayWithJitterTest.java | 6 +++--- .../backoff/FixedDelayWithJitterTest.java | 6 +++--- ...ptiveRetryStrategyResourceConstrainedTest.java | 15 ++++++++------- .../internal/AdaptiveRetryStrategyImpl.java | 4 ++-- .../internal/StandardRetryStrategyImpl.java | 4 ++-- .../internal/AdaptiveRetryStrategyTest.java | 6 +++--- .../internal/StandardRetryStrategyMiscTest.java | 4 ++-- .../internal/StandardRetryStrategyTest.java | 6 +++--- .../ratelimiter/RateLimiterTokenBucketTest.java | 6 +++--- 11 files changed, 33 insertions(+), 32 deletions(-) rename core/retries/src/{test => it}/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java (96%) diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java index b86e68c1964f..1815288bf1d6 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java @@ -22,7 +22,7 @@ */ @SdkPublicApi public final class TokenAcquisitionFailedException extends RuntimeException { - private final RetryToken token; + private final transient RetryToken token; /** * Exception construction accepting message with no root cause. diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index cea417cd06db..025d867d9263 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -24,9 +24,9 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; -public class RetryStrategyBuilderTest { +class RetryStrategyBuilderTest { - public static Collection parameters() { + static Collection parameters() { return Arrays.asList( new TestCase() .configure(b -> b.retryOnException(IllegalArgumentException.class)) @@ -105,7 +105,7 @@ public static Collection parameters() { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { assertThat(testCase.run()).isEqualTo(testCase.expected()); } diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java index db35b9653994..20ca56db426e 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java @@ -25,14 +25,14 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; -public class ExponentialDelayWithJitterTest { +class ExponentialDelayWithJitterTest { static final ComputedNextInt MIN_VALUE_RND = new ComputedNextInt(bound -> 0); static final ComputedNextInt MID_VALUE_RND = new ComputedNextInt(bound -> bound / 2); static final ComputedNextInt MAX_VALUE_RND = new ComputedNextInt(bound -> bound - 1); static final Duration BASE_DELAY = Duration.ofMillis(23); static final Duration MAX_DELAY = Duration.ofSeconds(20); - public static Collection parameters() { + static Collection parameters() { return Arrays.asList( // --- Using random that returns: bound - 1 new TestCase() @@ -126,7 +126,7 @@ public static Collection parameters() { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { assertThat(testCase.run()).isEqualTo(testCase.expected()); } diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java index e144d4e8af92..56c0e18ad15e 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java @@ -25,13 +25,13 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; -public class FixedDelayWithJitterTest { +class FixedDelayWithJitterTest { static final ComputedNextInt MIN_VALUE_RND = new ComputedNextInt(bound -> 0); static final ComputedNextInt MID_VALUE_RND = new ComputedNextInt(bound -> bound / 2); static final ComputedNextInt MAX_VALUE_RND = new ComputedNextInt(bound -> bound - 1); static final Duration BASE_DELAY = Duration.ofMillis(23); - public static Collection parameters() { + static Collection parameters() { return Arrays.asList( // --- Using random that returns: bound - 1 new TestCase() @@ -126,7 +126,7 @@ public static Collection parameters() { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { assertThat(testCase.run()).isEqualTo(testCase.expected()); } diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java similarity index 96% rename from core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java rename to core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java index 59a9614ec361..7c369c928a94 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java +++ b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java @@ -51,10 +51,10 @@ * are made to a very resource-constrained set of resources, and we expect that the delays created by the rate limiter allows the * perceived availability for the clients to be close to 1.0. */ -public class AdaptiveRetryStrategyResourceConstrainedTest { +class AdaptiveRetryStrategyResourceConstrainedTest { @Test - public void seemsToBeCorrectAndThreadSafe() { + void seemsToBeCorrectAndThreadSafe() { // Arrange the test. We allocate a single thread for each server worker // and for each client worker. int serverWorkers = 1; @@ -93,7 +93,7 @@ public void seemsToBeCorrectAndThreadSafe() { executor.shutdown(); } - public static List createClients(Server server, AdaptiveRetryStrategy strategy, int amount, int jobsPerClient) { + private static List createClients(Server server, AdaptiveRetryStrategy strategy, int amount, int jobsPerClient) { return IntStream.range(0, amount) .mapToObj(idx -> createClient(server, strategy, jobsPerClient)) .collect(Collectors.toCollection(() -> new ArrayList<>(amount))); @@ -103,7 +103,7 @@ private static Client createClient(Server server, AdaptiveRetryStrategy strategy return new Client(createJobs(jobs), server, strategy); } - public static List createJobs(int amount) { + private static List createJobs(int amount) { // We use a non-small but fixed size here instead of random ones to have a more predictable workload. int rows = 256; int cols = 256 + 128; @@ -142,13 +142,13 @@ static class Client { this.strategy = strategy; } - public void processAllJobs() { + void processAllJobs() { for (Job job : jobs) { process(job); } } - public void process(Job job) { + void process(Job job) { // submit job AcquireInitialTokenResponse response = strategy.acquireInitialToken(AcquireInitialTokenRequest.create("client")); RetryToken token = response.token(); @@ -222,7 +222,7 @@ void stop() { CompletableFuture.allOf(workers.toArray(new CompletableFuture[0])).join(); } - public void accept(Job job) { + void accept(Job job) { if (!jobQueue.offer(job)) { // No space left in the queue to take this job, throw a ThrottlingException to notify the // client about it and let him retry at a later time. @@ -238,6 +238,7 @@ static class ServerWorker implements Runnable { this.jobQueue = jobQueue; } + @Override public void run() { while (true) { try { diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java index e29e73114a06..ad509a262194 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java @@ -82,7 +82,7 @@ public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenReques @Override public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { DefaultRetryToken token = asStandardRetryToken(request.token()); - AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + AcquireResponse acquireResponse = requestAcquireCapacity(token); // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. // 1) is retryable? @@ -291,7 +291,7 @@ static DefaultRetryToken asStandardRetryToken(RetryToken token) { token.getClass().getName()); } - private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + private AcquireResponse requestAcquireCapacity(DefaultRetryToken token) { TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); return tokenBucket.tryAcquire(exceptionCost); } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java index 4e8ce225d09f..7e86cb50ce93 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java @@ -74,7 +74,7 @@ public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenReques @Override public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { DefaultRetryToken token = asStandardRetryToken(request.token()); - AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + AcquireResponse acquireResponse = requestAcquireCapacity(token); // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. // 1) is retryable? @@ -263,7 +263,7 @@ static DefaultRetryToken asStandardRetryToken(RetryToken token) { token.getClass().getName()); } - private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + private AcquireResponse requestAcquireCapacity(DefaultRetryToken token) { TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); if (!circuitBreakerEnabled) { return tokenBucket.tryAcquire(0); diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java index d452f31f6f91..115cd57085ef 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java @@ -44,7 +44,7 @@ // The tests here are the same set of test from the StandardRetryStrategy, both should be passing the same battery of tests. // Unfortunately It's not possible to create a single parametrized test on RetryStrategy given the different types required to // configure each strategy. -public class AdaptiveRetryStrategyTest { +class AdaptiveRetryStrategyTest { static final int TEST_BUCKET_CAPACITY = 100; static final int TEST_EXCEPTION_COST = 5; static final IllegalArgumentException IAE = new IllegalArgumentException(); @@ -52,7 +52,7 @@ public class AdaptiveRetryStrategyTest { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { testCase.run(); if (testCase.shouldSucceed) { assertThat(testCase.thrown) @@ -68,7 +68,7 @@ public void testCase(TestCase testCase) { assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); } - public static Collection parameters() { + static Collection parameters() { BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); return Arrays.asList( new TestCase("Succeeds when no exceptions are thrown") diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java index 3beac79074aa..946ff2f5fa3b 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java @@ -28,14 +28,14 @@ * Tests that the circuit breaker remembers its previous state for separated * requests. */ -public class StandardRetryStrategyMiscTest { +class StandardRetryStrategyMiscTest { static final int TEST_EXCEPTION_COST = 5; static final int TEST_MAX = 50; static final IllegalArgumentException IAE = new IllegalArgumentException(); static final RuntimeException RTE = new RuntimeException(); @Test - public void circuitBreakerRemembersState() { + void circuitBreakerRemembersState() { BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); TestCase testCase = new TestCase("circuit breaker remembers state") .configure(b -> b.maxAttempts(3)) diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java index a87b200389cc..3c3584e61771 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java @@ -40,7 +40,7 @@ import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; -public class StandardRetryStrategyTest { +class StandardRetryStrategyTest { static final int TEST_BUCKET_CAPACITY = 100; static final int TEST_EXCEPTION_COST = 5; static final IllegalArgumentException IAE = new IllegalArgumentException(); @@ -48,7 +48,7 @@ public class StandardRetryStrategyTest { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { testCase.run(); if (testCase.shouldSucceed) { assertThat(testCase.thrown) @@ -65,7 +65,7 @@ public void testCase(TestCase testCase) { } - public static Collection parameters() { + static Collection parameters() { BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); return Arrays.asList( new TestCase("Succeeds when no exceptions are thrown") diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java index fc91c6bcfa9f..14021ebde812 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/ratelimiter/RateLimiterTokenBucketTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; -public class RateLimiterTokenBucketTest { +class RateLimiterTokenBucketTest { private static MutableClock clock = null; private static RateLimiterTokenBucket tokenBucket = null; private static final double EPSILON = 0.0001; @@ -37,7 +37,7 @@ static void setup() { @ParameterizedTest @MethodSource("parameters") - public void testCase(TestCase testCase) { + void testCase(TestCase testCase) { clock.setCurrent(testCase.givenTimestamp); RateLimiterUpdateResponse res; tokenBucket.tryAcquire(); @@ -53,7 +53,7 @@ public void testCase(TestCase testCase) { } - public static Collection parameters() { + static Collection parameters() { return Arrays.asList( new TestCase() .givenSuccessResponse() From 3b1b7320b26ce3c3d482767d75c0df1afa19071a Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 24 May 2023 14:25:09 -0700 Subject: [PATCH 07/32] Add legacy retry strategy (#3988) * Add legacy retry strategy * Remove public modifiers from test classes to make SonarCloud happy * Fix another SonarCloud code smell * WIP * Address PR comments * Rename all the strategies to use Default prefix instead of Impl suffix * Address PR comments --- ...eRetryStrategyResourceConstrainedTest.java | 3 +- .../awssdk/retries/AdaptiveRetryStrategy.java | 7 +- .../awssdk/retries/DefaultRetryStrategy.java | 58 ++- .../awssdk/retries/LegacyRetryStrategy.java | 114 +++++ .../awssdk/retries/StandardRetryStrategy.java | 6 +- ...java => DefaultAdaptiveRetryStrategy.java} | 24 +- .../internal/DefaultLegacyRetryStrategy.java | 377 +++++++++++++++++ ...java => DefaultStandardRetryStrategy.java} | 28 +- .../internal/AdaptiveRetryStrategyTest.java | 12 +- .../internal/LegacyRetryStrategyTest.java | 106 +++++ ...ategyCircuitBreakerRemembersStateTest.java | 78 ++++ .../internal/RetryStrategyCommonTest.java | 395 ++++++++++++++++++ .../internal/StandardRetryStrategyTest.java | 10 +- 13 files changed, 1162 insertions(+), 56 deletions(-) create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java rename core/retries/src/main/java/software/amazon/awssdk/retries/internal/{AdaptiveRetryStrategyImpl.java => DefaultAdaptiveRetryStrategy.java} (95%) create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java rename core/retries/src/main/java/software/amazon/awssdk/retries/internal/{StandardRetryStrategyImpl.java => DefaultStandardRetryStrategy.java} (93%) create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/LegacyRetryStrategyTest.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java create mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java diff --git a/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java index 7c369c928a94..8ff75edb9411 100644 --- a/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java +++ b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java @@ -19,7 +19,6 @@ import static org.assertj.core.api.Assertions.withinPercentage; import java.time.Duration; -import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -63,7 +62,7 @@ void seemsToBeCorrectAndThreadSafe() { ExecutorService executor = Executors.newFixedThreadPool(parallelism); Server server = new Server(serverWorkers, executor); RateLimiterTokenBucketStore store = RateLimiterTokenBucketStore.builder().build(); - AdaptiveRetryStrategy strategy = AdaptiveRetryStrategyImpl + AdaptiveRetryStrategy strategy = DefaultAdaptiveRetryStrategy .builder() // We don't care about how many attempts we allow to, that logic is tested somewhere else. // so we give the strategy plenty of room for retries. diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java index 14ac2da01117..5606c66b5d12 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java @@ -21,7 +21,7 @@ import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.retries.api.RetryStrategy; -import software.amazon.awssdk.retries.internal.AdaptiveRetryStrategyImpl; +import software.amazon.awssdk.retries.internal.DefaultAdaptiveRetryStrategy; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; @@ -63,9 +63,9 @@ public interface AdaptiveRetryStrategy extends RetryStrategy */ static AdaptiveRetryStrategy.Builder builder() { - return AdaptiveRetryStrategyImpl + return DefaultAdaptiveRetryStrategy .builder() - .maxAttempts(DefaultRetryStrategy.Standard.MAX_ATTEMPTS) + .maxAttempts(DefaultRetryStrategy.Adaptive.MAX_ATTEMPTS) .tokenBucketStore(TokenBucketStore.builder() .tokenBucketMaxCapacity(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) .build()) @@ -84,7 +84,6 @@ interface Builder extends RetryStrategy.Builder */ Builder treatAsThrottling(Predicate treatAsThrottling); - @Override AdaptiveRetryStrategy build(); } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index e90277edc69d..7de8cfc66722 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -30,16 +30,16 @@ private DefaultRetryStrategy() { } /** - * Create a new builder for a {@code StandardRetryStrategy}. + * Create a new builder for a {@link StandardRetryStrategy}. * *

    Example Usage - *

    +     * {@snippet
          * StandardRetryStrategy retryStrategy =
    -     *     RetryStrategies.adaptiveStrategyBuilder()
    +     *     DefaultRetryStrategy.standardStrategyBuilder()
          *                    .retryOnExceptionInstanceOf(IllegalArgumentException.class)
          *                    .retryOnExceptionInstanceOf(IllegalStateException.class)
          *                    .build();
    -     * 
    + * } */ public static StandardRetryStrategy.Builder standardStrategyBuilder() { return StandardRetryStrategy.builder() @@ -48,18 +48,38 @@ public static StandardRetryStrategy.Builder standardStrategyBuilder() { } /** - * Create a new builder for a {@code AdaptiveRetryStrategy}. + * Create a new builder for a {@link LegacyRetryStrategy}. * *

    Example Usage - *

    +     * {@snippet
    +     * LegacyRetryStrategy retryStrategy =
    +     *     DefaultRetryStrategy.legacyStrategyBuilder()
    +     *                    .retryOnExceptionInstanceOf(IllegalArgumentException.class)
    +     *                    .retryOnExceptionInstanceOf(IllegalStateException.class)
    +     *                    .build();
    +     * }
    +     */
    +    public static LegacyRetryStrategy.Builder legacyStrategyBuilder() {
    +        return LegacyRetryStrategy.builder()
    +                                  .maxAttempts(Legacy.MAX_ATTEMPTS)
    +                                  .backoffStrategy(BackoffStrategy.exponentialDelay(Legacy.BASE_DELAY, Legacy.MAX_BACKOFF))
    +                                  .throttlingBackoffStrategy(BackoffStrategy.exponentialDelay(Legacy.THROTTLED_BASE_DELAY,
    +                                                                                              Legacy.MAX_BACKOFF));
    +    }
    +
    +    /**
    +     * Create a new builder for a {@link AdaptiveRetryStrategy}.
    +     *
    +     * 

    Example Usage + * {@snippet * AdaptiveRetryStrategy retryStrategy = - * RetryStrategies.adaptiveStrategyBuilder() + * DefaultRetryStrategy.adaptiveStrategyBuilder() * .retryOnExceptionInstanceOf(IllegalArgumentException.class) * .retryOnExceptionInstanceOf(IllegalStateException.class) * .build(); - *

    + * } */ - public static AdaptiveRetryStrategy.Builder adaptiveRetryStrategyBuilder() { + public static AdaptiveRetryStrategy.Builder adaptiveStrategyBuilder() { return AdaptiveRetryStrategy.builder() .maxAttempts(Adaptive.MAX_ATTEMPTS); } @@ -70,9 +90,29 @@ static final class Standard { static final Duration MAX_BACKOFF = Duration.ofSeconds(20); static final int TOKEN_BUCKET_SIZE = 500; static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; + + private Standard() { + } } static final class Adaptive { static final int MAX_ATTEMPTS = 3; + + private Adaptive() { + } + } + + static final class Legacy { + static final int MAX_ATTEMPTS = 4; + static final Duration BASE_DELAY = Duration.ofMillis(100); + static final Duration THROTTLED_BASE_DELAY = Duration.ofMillis(500); + + static final Duration MAX_BACKOFF = Duration.ofSeconds(20); + static final int TOKEN_BUCKET_SIZE = 500; + static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; + static final int THROTTLE_EXCEPTION_TOKEN_COST = 0; + + private Legacy() { + } } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java new file mode 100644 index 000000000000..4ca93f8482c5 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries; + +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.internal.DefaultLegacyRetryStrategy; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; + +/** + * The legacy retry strategy is a {@link RetryStrategy} for normal use-cases. + *

    + * The legacy retry strategy by default: + *

      + *
    1. Retries on the conditions configured in the {@link Builder}. + *
    2. Retries 3 times (4 total attempts). Adjust with {@link Builder#maxAttempts(int)} + *
    3. For non-throttling exceptions uses the {@link BackoffStrategy#exponentialDelay} backoff strategy, with a base delay + * of 100 milliseconds and max delay of 20 seconds. Adjust with {@link Builder#backoffStrategy} + *
    4. For throttling exceptions uses the {@link BackoffStrategy#exponentialDelay} backoff strategy, with a base delay of + * 500 milliseconds and max delay of 20 seconds. Adjust with {@link LegacyRetryStrategy.Builder#throttlingBackoffStrategy} + *
    5. Circuit breaking (disabling retries) in the event of high downstream failures across the scope of + * the strategy. The circuit breaking will never prevent a successful first attempt. Adjust with + * {@link Builder#circuitBreakerEnabled} + *
    6. The state of the circuit breaker is not affected by throttling exceptions + *
    + * + * @see StandardRetryStrategy + * @see AdaptiveRetryStrategy + */ +@SdkPublicApi +@ThreadSafe +public interface LegacyRetryStrategy extends RetryStrategy { + /** + * Create a new {@link LegacyRetryStrategy.Builder}. + * + *

    Example Usage + *

    +     * LegacyRetryStrategy retryStrategy =
    +     *     LegacyRetryStrategy.builder()
    +     *                          .retryOnExceptionInstanceOf(IllegalArgumentException.class)
    +     *                          .retryOnExceptionInstanceOf(IllegalStateException.class)
    +     *                          .build();
    +     * 
    + */ + static Builder builder() { + return DefaultLegacyRetryStrategy + .builder() + .maxAttempts(DefaultRetryStrategy.Legacy.MAX_ATTEMPTS) + .tokenBucketStore(TokenBucketStore + .builder() + .tokenBucketMaxCapacity(DefaultRetryStrategy.Legacy.TOKEN_BUCKET_SIZE) + .build()) + .tokenBucketExceptionCost(DefaultRetryStrategy.Legacy.DEFAULT_EXCEPTION_TOKEN_COST) + .tokenBucketThrottlingExceptionCost(DefaultRetryStrategy.Legacy.THROTTLE_EXCEPTION_TOKEN_COST); + } + + @Override + Builder toBuilder(); + + interface Builder extends RetryStrategy.Builder { + /** + * Configure the backoff strategy used by this strategy. + * + *

    By default, this uses jittered exponential backoff. + */ + Builder backoffStrategy(BackoffStrategy backoffStrategy); + + /** + * Configure the backoff strategy used for throttling exceptions by this strategy. + * + *

    By default, this uses jittered exponential backoff. + */ + Builder throttlingBackoffStrategy(BackoffStrategy throttlingBackoffStrategy); + + /** + * Whether circuit breaking is enabled for this strategy. + * + *

    The circuit breaker will prevent attempts (even below the {@link #maxAttempts(int)}) if a large number of + * failures are observed by this executor. + * + *

    Note: The circuit breaker scope is local to the created {@link RetryStrategy}, + * and will therefore not be effective unless the {@link RetryStrategy} is used for more than one call. It's recommended + * that a {@link RetryStrategy} be reused for all calls to a single unreliable resource. It's also recommended that + * separate {@link RetryStrategy}s be used for calls to unrelated resources. + * + *

    By default, this is {@code true}. + */ + Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled); + + /** + * Configure the predicate to allow the strategy categorize a Throwable as throttling exception. + */ + Builder treatAsThrottling(Predicate treatAsThrottling); + + @Override + LegacyRetryStrategy build(); + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java index 1e9db6f9f7c3..ed7069162e2a 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java @@ -19,7 +19,7 @@ import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.retries.api.RetryStrategy; -import software.amazon.awssdk.retries.internal.StandardRetryStrategyImpl; +import software.amazon.awssdk.retries.internal.DefaultStandardRetryStrategy; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; /** @@ -56,14 +56,14 @@ public interface StandardRetryStrategy extends RetryStrategy */ static Builder builder() { - return StandardRetryStrategyImpl + return DefaultStandardRetryStrategy .builder() .maxAttempts(DefaultRetryStrategy.Standard.MAX_ATTEMPTS) .tokenBucketStore(TokenBucketStore .builder() .tokenBucketMaxCapacity(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) .build()) - .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE); + .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.DEFAULT_EXCEPTION_TOKEN_COST); } @Override diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java similarity index 95% rename from core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java rename to core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java index ad509a262194..40d8820b1297 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyImpl.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java @@ -47,8 +47,8 @@ * Implementation of the {@link AdaptiveRetryStrategy} interface. */ @SdkInternalApi -public final class AdaptiveRetryStrategyImpl implements AdaptiveRetryStrategy { - private static final Logger LOG = Logger.loggerFor(AdaptiveRetryStrategyImpl.class); +public final class DefaultAdaptiveRetryStrategy implements AdaptiveRetryStrategy { + private static final Logger LOG = Logger.loggerFor(DefaultAdaptiveRetryStrategy.class); private final List> retryPredicates; private final int maxAttempts; private final boolean circuitBreakerEnabled; @@ -59,10 +59,10 @@ public final class AdaptiveRetryStrategyImpl implements AdaptiveRetryStrategy { private final TokenBucketStore tokenBucketStore; private final RateLimiterTokenBucketStore rateLimiterTokenBucketStore; - private AdaptiveRetryStrategyImpl(Builder builder) { + private DefaultAdaptiveRetryStrategy(Builder builder) { this.retryPredicates = Collections.unmodifiableList(Validate.paramNotNull(builder.retryPredicates, "retryPredicates")); this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); - this.circuitBreakerEnabled = builder.circuitBreakerEnabled; + this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); this.exceptionCost = builder.exceptionCost; this.tokenBucketMaxCapacity = builder.tokenBucketMaxCapacity; @@ -293,6 +293,9 @@ static DefaultRetryToken asStandardRetryToken(RetryToken token) { private AcquireResponse requestAcquireCapacity(DefaultRetryToken token) { TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + if (!circuitBreakerEnabled) { + return tokenBucket.tryAcquire(0); + } return tokenBucket.tryAcquire(exceptionCost); } @@ -310,7 +313,7 @@ private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, Acquire public static class Builder implements AdaptiveRetryStrategy.Builder { private List> retryPredicates; private int maxAttempts; - private boolean circuitBreakerEnabled; + private Boolean circuitBreakerEnabled; private int tokenBucketMaxCapacity; private int exceptionCost; private Predicate treatAsThrottling; @@ -322,7 +325,7 @@ public static class Builder implements AdaptiveRetryStrategy.Builder { retryPredicates = new ArrayList<>(); } - Builder(AdaptiveRetryStrategyImpl strategy) { + Builder(DefaultAdaptiveRetryStrategy strategy) { this.retryPredicates = new ArrayList<>(strategy.retryPredicates); this.maxAttempts = strategy.maxAttempts; this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; @@ -372,9 +375,14 @@ public Builder tokenBucketExceptionCost(int exceptionCost) { return this; } + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + this.circuitBreakerEnabled = circuitBreakerEnabled; + return this; + } + @Override - public AdaptiveRetryStrategyImpl build() { - return new AdaptiveRetryStrategyImpl(this); + public DefaultAdaptiveRetryStrategy build() { + return new DefaultAdaptiveRetryStrategy(this); } } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java new file mode 100644 index 000000000000..45cd30646a69 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java @@ -0,0 +1,377 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.LegacyRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenResponseImpl; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Implementation of the {@link LegacyRetryStrategy} interface. + */ +@SdkInternalApi +public final class DefaultLegacyRetryStrategy implements LegacyRetryStrategy { + private static final Logger LOG = Logger.loggerFor(DefaultLegacyRetryStrategy.class); + + private final List> predicates; + private final int maxAttempts; + private final boolean circuitBreakerEnabled; + private final BackoffStrategy backoffStrategy; + private final BackoffStrategy throttlingBackoffStrategy; + private final int exceptionCost; + private final int throttlingExceptionCost; + private final Predicate treatAsThrottling; + private final TokenBucketStore tokenBucketStore; + + private DefaultLegacyRetryStrategy(Builder builder) { + this.predicates = Collections.unmodifiableList(Validate.paramNotNull(builder.predicates, "predicates")); + this.maxAttempts = Validate.isPositive(Validate.paramNotNull(builder.maxAttempts, "maxAttempts"), "maxAttempts"); + this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; + this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); + this.throttlingBackoffStrategy = Validate.paramNotNull(builder.throttlingBackoffStrategy, "throttlingBackoffStrategy"); + this.exceptionCost = Validate.paramNotNull(builder.exceptionCost, "exceptionCost"); + this.throttlingExceptionCost = Validate.paramNotNull(builder.throttlingExceptionCost, "throttlingExceptionCost"); + this.treatAsThrottling = Validate.paramNotNull(builder.treatAsThrottling, "treatAsThrottling"); + this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); + } + + @Override + public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + logAcquireInitialToken(request); + return AcquireInitialTokenResponseImpl.create( + DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); + } + + @Override + public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + + // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. + // 1) is retryable? + throwOnNonRetryableException(request, acquireResponse); + // 2) max attempts reached? + throwOnMaxAttemptsReached(request, acquireResponse); + // 3) can we acquire a token? + throwOnAcquisitionFailure(request, acquireResponse); + + // Refresh the retry token and compute the backoff delay. + DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); + Duration finalDelay = computeBackoff(request, refreshedToken); + logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); + return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); + } + + @Override + public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); + DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); + logRecordSuccess(token, releaseResponse); + return RecordSuccessResponse.create(refreshedToken); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + /** + * Returns a builder to update this retry strategy. + */ + public static Builder builder() { + return new Builder(); + } + + private Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { + Duration backoff; + if (treatAsThrottling.test(request.failure())) { + backoff = throttlingBackoffStrategy.computeDelay(token.attempt()); + } else { + backoff = backoffStrategy.computeDelay(token.attempt()); + } + // Take the max delay between the suggested delay and the backoff delay. + Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); + return maxOf(suggested, backoff); + } + + private Duration maxOf(Duration left, Duration right) { + if (left.compareTo(right) >= 0) { + return left; + } + return right; + } + + private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { + TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); + int capacityReleased = token.capacityAcquired(); + return bucket.release(capacityReleased); + } + + private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { + return token.toBuilder() + .capacityAcquired(0) + .capacityRemaining(releaseResponse.currentCapacity()) + .state(DefaultRetryToken.TokenState.SUCCEEDED) + .build(); + } + + private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (acquireResponse.acquisitionFailed()) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .addFailure(failure) + .build(); + String message = acquisitionFailedMessage(acquireResponse); + LOG.debug(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + if (maxAttemptsReached(token)) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .addFailure(failure) + .build(); + String message = maxAttemptsReachedMessage(refreshedToken); + LOG.debug(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + Throwable failure = request.failure(); + if (isNonRetryableException(request)) { + String message = nonRetryableExceptionMessage(token); + LOG.error(() -> message, failure); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .addFailure(failure) + .build(); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + LOG.debug(() -> nonRetryableExceptionMessage(token), failure); + } + + private String nonRetryableExceptionMessage(DefaultRetryToken token) { + return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); + } + + private String maxAttemptsReachedMessage(DefaultRetryToken token) { + return String.format("Request will not be retried. Retries have been exhausted " + + "(cost: 0, capacity: %d/%d)", + token.capacityAcquired(), + token.capacityRemaining()); + } + + private String acquisitionFailedMessage(AcquireResponse acquireResponse) { + return String.format("Request will not be retried to protect the caller and downstream service. " + + "The cost of retrying (%d) " + + "exceeds the available retry capacity (%d/%d).", + acquireResponse.capacityRequested(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity()); + } + + private void logAcquireInitialToken(AcquireInitialTokenRequest request) { + // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); + LOG.debug(() -> String.format("Request attempt 1 token acquired " + + "(backoff: 0ms, cost: 0, capacity: %d/%d)", + tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); + } + + private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { + LOG.debug(() -> String.format("Request attempt %d token acquired " + + "(backoff: %dms, cost: %d, capacity: %d/%d)", + token.attempt(), delay.toMillis(), + acquireResponse.capacityAcquired(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity())); + } + + private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { + LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", + token.attempt(), release.capacityReleased(), + release.currentCapacity(), release.maxCapacity())); + + } + + private boolean maxAttemptsReached(DefaultRetryToken token) { + return token.attempt() >= maxAttempts; + } + + private boolean isNonRetryableException(RefreshRetryTokenRequest request) { + Throwable failure = request.failure(); + for (Predicate predicate : predicates) { + if (predicate.test(failure)) { + return false; + } + } + return true; + } + + static DefaultRetryToken asStandardRetryToken(RetryToken token) { + return Validate.isInstanceOf(DefaultRetryToken.class, token, + "RetryToken is of unexpected class (%s), " + + "This token was not created by this retry strategy.", + token.getClass().getName()); + } + + private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + int amountToAcquire = 0; + if (circuitBreakerEnabled) { + if (treatAsThrottling.test(request.failure())) { + amountToAcquire = throttlingExceptionCost; + } else { + amountToAcquire = exceptionCost; + } + } + return tokenBucket.tryAcquire(amountToAcquire); + } + + private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asStandardRetryToken(request.token()); + return token.toBuilder() + .increaseAttempt() + .state(DefaultRetryToken.TokenState.IN_PROGRESS) + .capacityAcquired(acquireResponse.capacityAcquired()) + .capacityRemaining(acquireResponse.capacityRemaining()) + .addFailure(request.failure()) + .build(); + } + + public static class Builder implements LegacyRetryStrategy.Builder { + private List> predicates; + private Integer maxAttempts; + private Boolean circuitBreakerEnabled; + private Integer exceptionCost; + private Integer throttlingExceptionCost; + private Predicate treatAsThrottling; + private BackoffStrategy backoffStrategy; + private BackoffStrategy throttlingBackoffStrategy; + private TokenBucketStore tokenBucketStore; + + Builder() { + predicates = new ArrayList<>(); + } + + Builder(DefaultLegacyRetryStrategy strategy) { + this.predicates = new ArrayList<>(strategy.predicates); + this.maxAttempts = strategy.maxAttempts; + this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; + this.exceptionCost = strategy.exceptionCost; + this.throttlingExceptionCost = strategy.throttlingExceptionCost; + this.treatAsThrottling = strategy.treatAsThrottling; + this.backoffStrategy = strategy.backoffStrategy; + this.throttlingBackoffStrategy = strategy.throttlingBackoffStrategy; + this.tokenBucketStore = strategy.tokenBucketStore; + } + + @Override + public Builder retryOnException(Predicate shouldRetry) { + this.predicates.add(shouldRetry); + return this; + } + + @Override + public Builder maxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + return this; + } + + @Override + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + this.circuitBreakerEnabled = circuitBreakerEnabled; + return this; + } + + @Override + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + this.backoffStrategy = backoffStrategy; + return this; + } + + @Override + public Builder treatAsThrottling(Predicate treatAsThrottling) { + this.treatAsThrottling = treatAsThrottling; + return this; + } + + @Override + public Builder throttlingBackoffStrategy(BackoffStrategy throttlingBackoffStrategy) { + this.throttlingBackoffStrategy = throttlingBackoffStrategy; + return this; + } + + public Builder tokenBucketExceptionCost(int exceptionCost) { + this.exceptionCost = exceptionCost; + return this; + } + + public Builder tokenBucketThrottlingExceptionCost(int throttlingExceptionCost) { + this.throttlingExceptionCost = throttlingExceptionCost; + return this; + } + + public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { + this.tokenBucketStore = tokenBucketStore; + return this; + } + + @Override + public DefaultLegacyRetryStrategy build() { + return new DefaultLegacyRetryStrategy(this); + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java similarity index 93% rename from core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java rename to core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java index 7e86cb50ce93..abee804a4def 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyImpl.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java @@ -45,8 +45,8 @@ * Implementation of the {@link StandardRetryStrategy} interface. */ @SdkInternalApi -public final class StandardRetryStrategyImpl implements StandardRetryStrategy { - private static final Logger LOG = Logger.loggerFor(StandardRetryStrategyImpl.class); +public final class DefaultStandardRetryStrategy implements StandardRetryStrategy { + private static final Logger LOG = Logger.loggerFor(DefaultStandardRetryStrategy.class); private final List> predicates; private final int maxAttempts; @@ -55,10 +55,10 @@ public final class StandardRetryStrategyImpl implements StandardRetryStrategy { private final int exceptionCost; private final TokenBucketStore tokenBucketStore; - private StandardRetryStrategyImpl(Builder builder) { + private DefaultStandardRetryStrategy(Builder builder) { this.predicates = Collections.unmodifiableList(Validate.paramNotNull(builder.predicates, "predicates")); this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); - this.circuitBreakerEnabled = builder.circuitBreakerEnabled; + this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); this.exceptionCost = builder.exceptionCost; this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); @@ -283,25 +283,19 @@ private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, Acquire } public static class Builder implements StandardRetryStrategy.Builder { - private static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; private static final int DEFAULT_TOKEN_BUCKET_SIZE = 500; private List> predicates; private int maxAttempts; - private boolean circuitBreakerEnabled; + private Boolean circuitBreakerEnabled; private int exceptionCost; private BackoffStrategy backoffStrategy; private TokenBucketStore tokenBucketStore; Builder() { predicates = new ArrayList<>(); - exceptionCost = DEFAULT_EXCEPTION_TOKEN_COST; - circuitBreakerEnabled = true; - tokenBucketStore = TokenBucketStore.builder() - .tokenBucketMaxCapacity(DEFAULT_TOKEN_BUCKET_SIZE) - .build(); } - Builder(StandardRetryStrategyImpl strategy) { + Builder(DefaultStandardRetryStrategy strategy) { this.predicates = new ArrayList<>(strategy.predicates); this.maxAttempts = strategy.maxAttempts; this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; @@ -324,11 +318,7 @@ public Builder maxAttempts(int maxAttempts) { @Override public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { - if (circuitBreakerEnabled == null) { - this.circuitBreakerEnabled = true; - } else { - this.circuitBreakerEnabled = circuitBreakerEnabled; - } + this.circuitBreakerEnabled = circuitBreakerEnabled; return this; } @@ -349,8 +339,8 @@ public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { } @Override - public StandardRetryStrategyImpl build() { - return new StandardRetryStrategyImpl(this); + public DefaultStandardRetryStrategy build() { + return new DefaultStandardRetryStrategy(this); } } } diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java index 115cd57085ef..41acc911cf24 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java @@ -164,9 +164,9 @@ static class TestCase { int attempts = 0; String scope = "none"; List exceptions = new ArrayList<>(); - AdaptiveRetryStrategyImpl.Builder builder = - (AdaptiveRetryStrategyImpl.Builder) - DefaultRetryStrategy.adaptiveRetryStrategyBuilder(); + DefaultAdaptiveRetryStrategy.Builder builder = + (DefaultAdaptiveRetryStrategy.Builder) + DefaultRetryStrategy.adaptiveStrategyBuilder(); Throwable thrown; boolean shouldSucceed = false; boolean succeeded; @@ -189,12 +189,12 @@ static class TestCase { public TestCase configure(Function configurator) { - this.builder = (AdaptiveRetryStrategyImpl.Builder) configurator.apply(this.builder); + this.builder = (DefaultAdaptiveRetryStrategy.Builder) configurator.apply(this.builder); return this; } - public TestCase fineTune(Function configurator) { + public TestCase fineTune(Function configurator) { this.builder = configurator.apply(this.builder); return this; } diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/LegacyRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/LegacyRetryStrategyTest.java new file mode 100644 index 000000000000..05168677240d --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/LegacyRetryStrategyTest.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.IAE; +import static software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.TEST_BUCKET_CAPACITY; +import static software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.TEST_EXCEPTION_COST; + +import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.TestCase; +import software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.TestCaseForLegacy; + +class LegacyRetryStrategyTest { + static final ThrottlingException THROTTLING = new ThrottlingException(); + static final long THROTTLING_BACKOFF_BASE = 17; + static final long BACKOFF_BASE = 23; + + @ParameterizedTest + @MethodSource("parameters") + void testCase(TestCase testCase) { + testCase.run(); + if (testCase.shouldSucceed) { + assertThat(testCase.thrown) + .as(testCase.name) + .isNull(); + } else { + assertThat(testCase.thrown) + .as(testCase.name) + .isNotNull(); + } + assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); + assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); + assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); + if (testCase.expectedLastRecordedDelay != null) { + assertThat(testCase.lastRecordedDelay).as(testCase.name).isEqualTo(testCase.expectedLastRecordedDelay); + } + } + + static List parameters() { + return Arrays.asList( + legacyTestCase("Does not withdraws capacity for throttling exception") + .givenExceptions(THROTTLING, THROTTLING, THROTTLING) + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectLastRecordedDelay(Duration.ofSeconds(THROTTLING_BACKOFF_BASE * 3)) + .expectThrows() + , legacyTestCase("Only withdraws capacity for non-throttling exception") + .givenExceptions(THROTTLING, IAE, THROTTLING) + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectLastRecordedDelay(Duration.ofSeconds(BACKOFF_BASE * 3)) + .expectThrows() + , legacyTestCase("Uses throttling backoff strategy for throttling exceptions") + .givenExceptions(THROTTLING, THROTTLING, THROTTLING) + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectLastRecordedDelay(Duration.ofSeconds(THROTTLING_BACKOFF_BASE * 3)) + .expectThrows() + , legacyTestCase("Uses regular backoff strategy for non-throttling exceptions") + .givenExceptions(THROTTLING, IAE, THROTTLING) + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectLastRecordedDelay(Duration.ofSeconds(BACKOFF_BASE * 3)) + .expectThrows() + ); + } + + static TestCaseForLegacy legacyTestCase(String name) { + TestCaseForLegacy testCase = new TestCaseForLegacy(name); + testCase.configureTreatAsThrottling(t -> t instanceof ThrottlingException) + .configureThrottlingBackoffStrategy(ofBaseTimesAttempt(THROTTLING_BACKOFF_BASE)) + .configureBackoffStrategy(ofBaseTimesAttempt(BACKOFF_BASE)) + .configureTokenBucketExceptionCost(TEST_EXCEPTION_COST) + .configureTokenBucketMaxCapacity(TEST_BUCKET_CAPACITY) + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(ThrottlingException.class)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)); + return testCase; + } + + static BackoffStrategy ofBaseTimesAttempt(long base) { + return attempt -> Duration.ofSeconds(base * attempt); + } + + static class ThrottlingException extends Exception { + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java new file mode 100644 index 000000000000..92ca99e1c9d5 --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.internal.RetryStrategyCommonTest.TestCase; + +/** + * Tests that the configured circuit breaker for each of the strategies remembers + * state across requests. + */ +class RetryStrategyCircuitBreakerRemembersStateTest { + static final int TEST_EXCEPTION_COST = 5; + static final int TEST_MAX = 50; + static final IllegalArgumentException IAE = new IllegalArgumentException(); + + @ParameterizedTest + @MethodSource("parameters") + void circuitBreakerRemembersState(Function defaultTestCaseSupplier) { + BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); + TestCase testCase = defaultTestCaseSupplier.apply("circuit breaker remembers state") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configureTokenBucketExceptionCost(TEST_EXCEPTION_COST) + .configureTokenBucketMaxCapacity(TEST_MAX) + .givenExceptions(IAE, IAE); + + // The test case will throw twice and then succeed, so each run will withdraw 2 * TEST_EXCEPTION_COST and deposit back + // TEST_EXCEPTION_COST. + RetryStrategy strategy = testCase.builder.build(); + int total = TEST_MAX; + for (int idx = 0; idx < 9; idx++) { + String name = testCase.name + " round " + idx; + TestCase.runTestCase(testCase, strategy); + assertThat(testCase.thrown).as(name).isNull(); + assertThat(testCase.succeeded).as(name).isTrue(); + assertThat(testCase.token.capacityRemaining()).as(name).isEqualTo(total - TEST_EXCEPTION_COST); + assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.SUCCEEDED); + total -= TEST_EXCEPTION_COST; + } + // The tokens have been exhausted, assert that the next call will fail. + String name = testCase.name + " no more tokens available"; + TestCase.runTestCase(testCase, strategy); + assertThat(testCase.thrown).as(name).isNotNull(); + assertThat(testCase.succeeded).as(name).isFalse(); + assertThat(testCase.token.capacityRemaining()).as(name).isZero(); + assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED); + } + + static List> parameters() { + return Arrays.asList( + RetryStrategyCommonTest.TestCaseForLegacy::new, + RetryStrategyCommonTest.TestCaseForStandard::new, + RetryStrategyCommonTest.TestCaseForAdaptive::new); + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java new file mode 100644 index 000000000000..6ec0dfcdf6cf --- /dev/null +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java @@ -0,0 +1,395 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; + +/** + * Test cases common that all retries strategies should satisfy. + */ +class RetryStrategyCommonTest { + + static final int TEST_BUCKET_CAPACITY = 100; + static final int TEST_EXCEPTION_COST = 5; + static final IllegalArgumentException IAE = new IllegalArgumentException(); + static final RuntimeException RTE = new RuntimeException(); + + @ParameterizedTest + @MethodSource("parameters") + void testCase(TestCase testCase) { + testCase.run(); + if (testCase.shouldSucceed) { + assertThat(testCase.thrown) + .as(testCase.name) + .isNull(); + } else { + assertThat(testCase.thrown) + .as(testCase.name) + .isNotNull(); + } + assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); + assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); + assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); + } + + static Stream parameters() { + return Stream.concat( + Stream.concat(buildCases(TestCaseForLegacy::new), + buildCases(TestCaseForStandard::new)), + buildCases(TestCaseForAdaptive::new)); + } + + static Stream buildCases(Function defaultTestCaseSupplier) { + // Configure with well-known values to be able to assert on these without relaying any configured defaults. + Function testCaseSupplier = + defaultTestCaseSupplier.andThen(t -> t.configureTokenBucketExceptionCost(TEST_EXCEPTION_COST) + .configureTokenBucketMaxCapacity(TEST_BUCKET_CAPACITY)); + return Stream.of( + testCaseSupplier.apply("Succeeds when no exceptions are thrown") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , testCaseSupplier.apply("Succeeds when 1 exception is thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(IAE) + // Acquire cost and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , testCaseSupplier.apply("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(IAE, IAE) + // Acquire (cost * 2) and then return cost + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , testCaseSupplier.apply("Fails when 3 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , testCaseSupplier.apply("Fails when 4 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(IAE, IAE, IAE, IAE) + // Acquire (cost * 3) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .expectThrows() + , testCaseSupplier.apply("Fails when non-retryable exception throw in the 1st attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , testCaseSupplier.apply("Fails when non-retryable exception throw in the 2nd attempt") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .givenExceptions(IAE, RTE) + // Acquire (cost * 1) and then return zero + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) + .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .expectThrows() + , testCaseSupplier.apply("Exhausts the token bucket.") + .configure(b -> b.maxAttempts(5)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configureTokenBucketMaxCapacity(10) + .givenExceptions(IAE, IAE, IAE) + .expectCapacity(0) + .expectState(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .expectThrows() + , testCaseSupplier.apply("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + // Setting exception cost to ZERO disables the circuit-breaker + .configureTokenBucketExceptionCost(0) + .givenExceptions(IAE, IAE) + // Acquired zero, capacity must be unchanged. + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + , testCaseSupplier.apply("Succeeds when 2 exceptions are thrown out max of 3") + .configure(b -> b.maxAttempts(3)) + .configure(b -> b.retryOnException(IllegalArgumentException.class)) + .configureCircuitBreakerEnabled(false) + .givenExceptions(IAE, IAE) + // Acquired zero, capacity must be unchanged. + .expectCapacity(TEST_BUCKET_CAPACITY) + .expectState(DefaultRetryToken.TokenState.SUCCEEDED) + .expectSuccess() + ); + } + + + abstract static class TestCase { + final String name; + int attempts = 0; + String scope = "none"; + List exceptions = new ArrayList<>(); + RetryStrategy.Builder builder; + Throwable thrown; + boolean shouldSucceed = false; + boolean succeeded; + Integer expectedCapacity; + DefaultRetryToken.TokenState expectedState; + Duration expectedLastRecordedDelay; + DefaultRetryToken token; + Duration lastRecordedDelay; + + TestCase(String name, RetryStrategy.Builder builder) { + this.name = name; + this.builder = builder; + } + + + public TestCase configure(Function, + RetryStrategy.Builder> configurator) { + this.builder = configurator.apply(this.builder); + return this; + } + + public abstract TestCase configureTokenBucketMaxCapacity(int maxCapacity); + + public abstract TestCase configureTokenBucketExceptionCost(int exceptionCost); + + public abstract TestCase configureCircuitBreakerEnabled(boolean enabled); + + public TestCase givenExceptions(Exception... exceptions) { + Collections.addAll(this.exceptions, exceptions); + return this; + } + + public TestCase expectSuccess() { + this.shouldSucceed = true; + return this; + } + + public TestCase expectThrows() { + this.shouldSucceed = false; + return this; + } + + public TestCase expectCapacity(Integer expectedCapacity) { + this.expectedCapacity = expectedCapacity; + return this; + } + + public TestCase expectState(DefaultRetryToken.TokenState expectedState) { + this.expectedState = expectedState; + return this; + } + + public TestCase expectLastRecordedDelay(Duration delay) { + this.expectedLastRecordedDelay = delay; + return this; + } + + public void run() { + RetryStrategy strategy = builder.build(); + runTestCase(this, strategy); + } + + public static void runTestCase(TestCase testCase, RetryStrategy strategy) { + AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); + RetryToken token = res.token(); + testCase.succeeded = false; + BusinessLogic logic = new BusinessLogic(testCase.exceptions); + try { + while (!testCase.succeeded) { + try { + logic.call(); + testCase.succeeded = true; + RecordSuccessResponse response = strategy.recordSuccess(RecordSuccessRequest.create(token)); + token = response.token(); + testCase.token = (DefaultRetryToken) token; + } catch (Exception e) { + RefreshRetryTokenResponse refreshResponse = + strategy.refreshRetryToken(RefreshRetryTokenRequest.builder() + .token(token) + .failure(e) + .build()); + testCase.lastRecordedDelay = refreshResponse.delay(); + token = refreshResponse.token(); + } + } + } catch (TokenAcquisitionFailedException e) { + testCase.thrown = e; + testCase.succeeded = false; + testCase.token = (DefaultRetryToken) e.token(); + } + } + } + + static class TestCaseForStandard extends TestCase { + + TestCaseForStandard(String name) { + super("TestCaseForStandard:: " + name, DefaultRetryStrategy.standardStrategyBuilder()); + } + + @Override + public TestCase configureTokenBucketMaxCapacity(int maxCapacity) { + ((DefaultStandardRetryStrategy.Builder) builder).tokenBucketStore( + TokenBucketStore + .builder() + .tokenBucketMaxCapacity(maxCapacity) + .build()); + return this; + } + + @Override + public TestCase configureTokenBucketExceptionCost(int exceptionCost) { + ((DefaultStandardRetryStrategy.Builder) builder).tokenBucketExceptionCost(exceptionCost); + return this; + } + + @Override + public TestCase configureCircuitBreakerEnabled(boolean enabled) { + ((DefaultStandardRetryStrategy.Builder) builder).circuitBreakerEnabled(enabled); + return this; + } + } + + static class TestCaseForLegacy extends TestCase { + TestCaseForLegacy(String name) { + super("TestCaseForLegacy:: " + name, + DefaultRetryStrategy.legacyStrategyBuilder() + .treatAsThrottling(t -> false)); + } + + @Override + public TestCase configureTokenBucketMaxCapacity(int maxCapacity) { + ((DefaultLegacyRetryStrategy.Builder) builder).tokenBucketStore( + TokenBucketStore + .builder() + .tokenBucketMaxCapacity(maxCapacity) + .build()); + return this; + } + + @Override + public TestCase configureTokenBucketExceptionCost(int exceptionCost) { + ((DefaultLegacyRetryStrategy.Builder) builder).tokenBucketExceptionCost(exceptionCost); + return this; + } + + @Override + public TestCase configureCircuitBreakerEnabled(boolean enabled) { + ((DefaultLegacyRetryStrategy.Builder) builder).circuitBreakerEnabled(enabled); + return this; + } + + public TestCaseForLegacy configureTreatAsThrottling(Predicate isThrottling) { + ((DefaultLegacyRetryStrategy.Builder) builder).treatAsThrottling(isThrottling); + return this; + } + + public TestCaseForLegacy configureThrottlingBackoffStrategy(BackoffStrategy backoffStrategy) { + ((DefaultLegacyRetryStrategy.Builder) builder).throttlingBackoffStrategy(backoffStrategy); + return this; + } + + public TestCaseForLegacy configureBackoffStrategy(BackoffStrategy backoffStrategy) { + ((DefaultLegacyRetryStrategy.Builder) builder).backoffStrategy(backoffStrategy); + return this; + } + } + + static class TestCaseForAdaptive extends TestCase { + + TestCaseForAdaptive(String name) { + super("TestCaseForAdaptive:: " + name, + DefaultRetryStrategy.adaptiveStrategyBuilder() + .treatAsThrottling(t -> false)); + } + + @Override + public TestCase configureTokenBucketMaxCapacity(int maxCapacity) { + ((DefaultAdaptiveRetryStrategy.Builder) builder).tokenBucketStore( + TokenBucketStore + .builder() + .tokenBucketMaxCapacity(maxCapacity) + .build()); + return this; + } + + @Override + public TestCase configureTokenBucketExceptionCost(int exceptionCost) { + ((DefaultAdaptiveRetryStrategy.Builder) builder).tokenBucketExceptionCost(exceptionCost); + return this; + } + + @Override + public TestCase configureCircuitBreakerEnabled(boolean enabled) { + ((DefaultAdaptiveRetryStrategy.Builder) builder).circuitBreakerEnabled(enabled); + return this; + } + + public TestCase configureTreatAsThrottling(Predicate isThrottling) { + ((DefaultAdaptiveRetryStrategy.Builder) builder).treatAsThrottling(isThrottling); + return this; + } + } + + static class BusinessLogic implements Callable { + List exceptions; + int invocation = 0; + + BusinessLogic(List exceptions) { + this.exceptions = exceptions; + } + + @Override + public Integer call() throws Exception { + if (invocation < exceptions.size()) { + throw exceptions.get(invocation++); + } + invocation++; + return invocation; + } + } +} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java index 3c3584e61771..6a2aca04486d 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java @@ -161,8 +161,8 @@ static class TestCase { int attempts = 0; String scope = "none"; List exceptions = new ArrayList<>(); - StandardRetryStrategyImpl.Builder builder = - (StandardRetryStrategyImpl.Builder) + DefaultStandardRetryStrategy.Builder builder = + (DefaultStandardRetryStrategy.Builder) DefaultRetryStrategy.standardStrategyBuilder(); Throwable thrown; boolean shouldSucceed = false; @@ -180,15 +180,15 @@ static class TestCase { .build()); } - public TestCase fineTune(Function configurator) { + public TestCase fineTune(Function configurator) { this.builder = configurator.apply(this.builder); return this; } public TestCase configure(Function configurator) { - this.builder = (StandardRetryStrategyImpl.Builder) configurator.apply(this.builder); + this.builder = (DefaultStandardRetryStrategy.Builder) configurator.apply(this.builder); return this; } From ac50063c37186c306282415a7c94f54505ec070a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Wed, 24 May 2023 14:57:42 -0700 Subject: [PATCH 08/32] Remove those tests that are now part of a different class --- .../internal/AdaptiveRetryStrategyTest.java | 279 ------------------ .../StandardRetryStrategyMiscTest.java | 72 ----- .../internal/StandardRetryStrategyTest.java | 272 ----------------- 3 files changed, 623 deletions(-) delete mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java delete mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java delete mode 100644 core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java deleted file mode 100644 index 41acc911cf24..000000000000 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyTest.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.retries.internal; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.function.Function; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import software.amazon.awssdk.retries.AdaptiveRetryStrategy; -import software.amazon.awssdk.retries.DefaultRetryStrategy; -import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; -import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.api.RecordSuccessResponse; -import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; -import software.amazon.awssdk.retries.api.RetryToken; -import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; -import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; -import software.amazon.awssdk.retries.api.internal.RecordSuccessRequestImpl; -import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenRequestImpl; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; -import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; - -// The tests here are the same set of test from the StandardRetryStrategy, both should be passing the same battery of tests. -// Unfortunately It's not possible to create a single parametrized test on RetryStrategy given the different types required to -// configure each strategy. -class AdaptiveRetryStrategyTest { - static final int TEST_BUCKET_CAPACITY = 100; - static final int TEST_EXCEPTION_COST = 5; - static final IllegalArgumentException IAE = new IllegalArgumentException(); - static final RuntimeException RTE = new RuntimeException(); - - @ParameterizedTest - @MethodSource("parameters") - void testCase(TestCase testCase) { - testCase.run(); - if (testCase.shouldSucceed) { - assertThat(testCase.thrown) - .as(testCase.name) - .isNull(); - } else { - assertThat(testCase.thrown) - .as(testCase.name) - .isNotNull(); - } - assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); - assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); - assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); - } - - static Collection parameters() { - BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); - return Arrays.asList( - new TestCase("Succeeds when no exceptions are thrown") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Succeeds when 1 exception is thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE) - // Acquire cost and then return cost - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE) - // Acquire (cost * 2) and then return cost - .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Fails when 3 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) - .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .expectThrows() - , new TestCase("Fails when 4 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) - .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .expectThrows() - , new TestCase("Fails when non-retryable exception throw in the 1st attempt") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) - .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .expectThrows() - , new TestCase("Fails when non-retryable exception throw in the 2nd attempt") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) - .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .expectThrows() - , new TestCase("Exhausts the token bucket.") - .configure(b -> b.maxAttempts(5)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - .fineTune(b -> b.tokenBucketStore( TokenBucketStore - .builder() - .tokenBucketMaxCapacity(10) - .build())) - .givenExceptions(IAE, IAE, IAE) - .expectCapacity(0) - .expectState(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) - .expectThrows() - , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .fineTune(b -> b.backoffStrategy(backoff)) - // Setting exception cost to ZERO disables the circuit-breaker - .fineTune(b -> b.tokenBucketExceptionCost(0)) - .givenExceptions(IAE, IAE) - // Acquired zero, capacity must be unchanged. - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - - ); - } - - static class TestCase { - final String name; - int attempts = 0; - String scope = "none"; - List exceptions = new ArrayList<>(); - DefaultAdaptiveRetryStrategy.Builder builder = - (DefaultAdaptiveRetryStrategy.Builder) - DefaultRetryStrategy.adaptiveStrategyBuilder(); - Throwable thrown; - boolean shouldSucceed = false; - boolean succeeded; - Integer expectedCapacity; - DefaultRetryToken.TokenState expectedState; - DefaultRetryToken token; - - TestCase(String name) { - this.name = name; - builder = builder.tokenBucketExceptionCost(TEST_EXCEPTION_COST) - .treatAsThrottling(t -> false) - .tokenBucketStore(TokenBucketStore - .builder() - .tokenBucketMaxCapacity(TEST_BUCKET_CAPACITY) - .build()) - .rateLimiterTokenBucketStore(RateLimiterTokenBucketStore - .builder() - .build()); - } - - public TestCase configure(Function configurator) { - this.builder = (DefaultAdaptiveRetryStrategy.Builder) configurator.apply(this.builder); - return this; - } - - public TestCase fineTune(Function configurator) { - this.builder = configurator.apply(this.builder); - return this; - } - - public TestCase givenExceptions(Exception... exceptions) { - Collections.addAll(this.exceptions, exceptions); - return this; - } - - public TestCase expectSuccess() { - this.shouldSucceed = true; - return this; - } - - public TestCase expectThrows() { - this.shouldSucceed = false; - return this; - } - - public TestCase expectCapacity(Integer expectedCapacity) { - this.expectedCapacity = expectedCapacity; - return this; - } - - public TestCase expectState(DefaultRetryToken.TokenState expectedState) { - this.expectedState = expectedState; - return this; - } - - public void run() { - AdaptiveRetryStrategy strategy = builder.build(); - runTestCase(this, strategy); - } - - public static void runTestCase(TestCase testCase, AdaptiveRetryStrategy strategy) { - AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); - RetryToken token = res.token(); - testCase.succeeded = false; - BusinessLogic logic = new BusinessLogic(testCase.exceptions); - try { - while (!testCase.succeeded) { - try { - logic.call(); - testCase.succeeded = true; - RecordSuccessResponse response = strategy.recordSuccess(RecordSuccessRequestImpl.create(token)); - token = response.token(); - testCase.token = (DefaultRetryToken) token; - } catch (Exception e) { - RefreshRetryTokenResponse refreshResponse = - strategy.refreshRetryToken(RefreshRetryTokenRequestImpl.builder() - .token(token) - .failure(e) - .build()); - token = refreshResponse.token(); - } - } - } catch (TokenAcquisitionFailedException e) { - testCase.thrown = e; - testCase.succeeded = false; - testCase.token = (DefaultRetryToken) e.token(); - } - } - } - - static class BusinessLogic implements Callable { - List exceptions; - int invocation = 0; - - BusinessLogic(List exceptions) { - this.exceptions = exceptions; - } - - @Override - public Integer call() throws Exception { - if (invocation < exceptions.size()) { - throw exceptions.get(invocation++); - } - invocation++; - return invocation; - } - } -} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java deleted file mode 100644 index 946ff2f5fa3b..000000000000 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyMiscTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.retries.internal; - -import static software.amazon.awssdk.retries.internal.StandardRetryStrategyTest.TestCase; -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.Duration; -import org.junit.jupiter.api.Test; -import software.amazon.awssdk.retries.StandardRetryStrategy; -import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; - -/** - * Tests that the circuit breaker remembers its previous state for separated - * requests. - */ -class StandardRetryStrategyMiscTest { - static final int TEST_EXCEPTION_COST = 5; - static final int TEST_MAX = 50; - static final IllegalArgumentException IAE = new IllegalArgumentException(); - static final RuntimeException RTE = new RuntimeException(); - - @Test - void circuitBreakerRemembersState() { - BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); - TestCase testCase = new TestCase("circuit breaker remembers state") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .fineTune(b -> b.tokenBucketExceptionCost(TEST_EXCEPTION_COST)) - .fineTune(b -> b.tokenBucketStore(TokenBucketStore - .builder() - .tokenBucketMaxCapacity(TEST_MAX) - .build())) - .givenExceptions(IAE, IAE); - - // The test case will throw twice and then succeed, so each run will withdraw 2 * TEST_EXCEPTION_COST and deposit back - // TEST_EXCEPTION_COST. - StandardRetryStrategy strategy = testCase.builder.build(); - int total = TEST_MAX; - for (int idx = 0; idx < 9; idx++) { - String name = testCase.name + " round " + idx; - TestCase.runTestCase(testCase, strategy); - assertThat(testCase.thrown).as(name).isNull(); - assertThat(testCase.succeeded).as(name).isTrue(); - assertThat(testCase.token.capacityRemaining()).as(name).isEqualTo(total - TEST_EXCEPTION_COST); - assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.SUCCEEDED); - total -= TEST_EXCEPTION_COST; - } - // The tokens have been exhausted, assert that the next call will fail. - String name = testCase.name + " no more tokens available"; - TestCase.runTestCase(testCase, strategy); - assertThat(testCase.thrown).as(name).isNotNull(); - assertThat(testCase.succeeded).as(name).isFalse(); - assertThat(testCase.token.capacityRemaining()).as(name).isZero(); - assertThat(testCase.token.state()).as(name).isEqualTo(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED); - } -} diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java deleted file mode 100644 index 6a2aca04486d..000000000000 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/StandardRetryStrategyTest.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.retries.internal; - -import static org.assertj.core.api.Assertions.assertThat; - -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.function.Function; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import software.amazon.awssdk.retries.DefaultRetryStrategy; -import software.amazon.awssdk.retries.StandardRetryStrategy; -import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; -import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.api.RecordSuccessRequest; -import software.amazon.awssdk.retries.api.RecordSuccessResponse; -import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; -import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; -import software.amazon.awssdk.retries.api.RetryToken; -import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; -import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenRequestImpl; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; - -class StandardRetryStrategyTest { - static final int TEST_BUCKET_CAPACITY = 100; - static final int TEST_EXCEPTION_COST = 5; - static final IllegalArgumentException IAE = new IllegalArgumentException(); - static final RuntimeException RTE = new RuntimeException(); - - @ParameterizedTest - @MethodSource("parameters") - void testCase(TestCase testCase) { - testCase.run(); - if (testCase.shouldSucceed) { - assertThat(testCase.thrown) - .as(testCase.name) - .isNull(); - } else { - assertThat(testCase.thrown) - .as(testCase.name) - .isNotNull(); - } - assertThat(testCase.succeeded).as(testCase.name).isEqualTo(testCase.shouldSucceed); - assertThat(testCase.token.capacityRemaining()).as(testCase.name).isEqualTo(testCase.expectedCapacity); - assertThat(testCase.token.state()).as(testCase.name).isEqualTo(testCase.expectedState); - - } - - static Collection parameters() { - BackoffStrategy backoff = BackoffStrategy.exponentialDelay(Duration.ofMillis(10), Duration.ofSeconds(25)); - return Arrays.asList( - new TestCase("Succeeds when no exceptions are thrown") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Succeeds when 1 exception is thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE) - // Acquire cost and then return cost - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE) - // Acquire (cost * 2) and then return cost - .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - , new TestCase("Fails when 3 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) - .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .expectThrows() - , new TestCase("Fails when 4 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) - .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .expectThrows() - , new TestCase("Fails when non-retryable exception throw in the 1st attempt") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) - .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .expectThrows() - , new TestCase("Fails when non-retryable exception throw in the 2nd attempt") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .givenExceptions(IAE, RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) - .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .expectThrows() - , new TestCase("Exhausts the token bucket.") - .configure(b -> b.maxAttempts(5)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - .fineTune(b -> b.tokenBucketStore(TokenBucketStore - .builder() - .tokenBucketMaxCapacity(10) - .build())) - .givenExceptions(IAE, IAE, IAE) - .expectCapacity(0) - .expectState(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) - .expectThrows() - , new TestCase("Succeeds when 2 exceptions are thrown out max of 3") - .configure(b -> b.maxAttempts(3)) - .configure(b -> b.retryOnException(IllegalArgumentException.class)) - .configure(b -> b.backoffStrategy(backoff)) - // Setting exception cost to ZERO disables the circuit-breaker - .fineTune(b -> b.tokenBucketExceptionCost(0)) - .givenExceptions(IAE, IAE) - // Acquired zero, capacity must be unchanged. - .expectCapacity(TEST_BUCKET_CAPACITY) - .expectState(DefaultRetryToken.TokenState.SUCCEEDED) - .expectSuccess() - ); - } - - - static class TestCase { - final String name; - int attempts = 0; - String scope = "none"; - List exceptions = new ArrayList<>(); - DefaultStandardRetryStrategy.Builder builder = - (DefaultStandardRetryStrategy.Builder) - DefaultRetryStrategy.standardStrategyBuilder(); - Throwable thrown; - boolean shouldSucceed = false; - boolean succeeded; - Integer expectedCapacity; - DefaultRetryToken.TokenState expectedState; - DefaultRetryToken token; - - TestCase(String name) { - this.name = name; - builder = builder.tokenBucketExceptionCost(TEST_EXCEPTION_COST) - .tokenBucketStore(TokenBucketStore - .builder() - .tokenBucketMaxCapacity(TEST_BUCKET_CAPACITY) - .build()); - } - - public TestCase fineTune(Function configurator) { - this.builder = configurator.apply(this.builder); - return this; - } - - public TestCase configure(Function configurator) { - this.builder = (DefaultStandardRetryStrategy.Builder) configurator.apply(this.builder); - return this; - } - - public TestCase givenExceptions(Exception... exceptions) { - Collections.addAll(this.exceptions, exceptions); - return this; - } - - public TestCase expectSuccess() { - this.shouldSucceed = true; - return this; - } - - public TestCase expectThrows() { - this.shouldSucceed = false; - return this; - } - - public TestCase expectCapacity(Integer expectedCapacity) { - this.expectedCapacity = expectedCapacity; - return this; - } - - public TestCase expectState(DefaultRetryToken.TokenState expectedState) { - this.expectedState = expectedState; - return this; - } - - public void run() { - StandardRetryStrategy strategy = builder.build(); - runTestCase(this, strategy); - } - - public static void runTestCase(TestCase testCase, StandardRetryStrategy strategy) { - AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); - RetryToken token = res.token(); - testCase.succeeded = false; - BusinessLogic logic = new BusinessLogic(testCase.exceptions); - try { - while (!testCase.succeeded) { - try { - logic.call(); - testCase.succeeded = true; - RecordSuccessResponse response = strategy.recordSuccess(RecordSuccessRequest.create(token)); - token = response.token(); - testCase.token = (DefaultRetryToken) token; - } catch (Exception e) { - RefreshRetryTokenResponse refreshResponse = - strategy.refreshRetryToken(RefreshRetryTokenRequest.builder() - .token(token) - .failure(e) - .build()); - token = refreshResponse.token(); - } - } - } catch (TokenAcquisitionFailedException e) { - testCase.thrown = e; - testCase.succeeded = false; - testCase.token = (DefaultRetryToken) e.token(); - } - } - } - - static class BusinessLogic implements Callable { - List exceptions; - int invocation = 0; - - BusinessLogic(List exceptions) { - this.exceptions = exceptions; - } - - @Override - public Integer call() throws Exception { - if (invocation < exceptions.size()) { - throw exceptions.get(invocation++); - } - invocation++; - return invocation; - } - } -} From 46e5194e6edb047a0ff0284f86b35d12c57b9078 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Wed, 24 May 2023 14:58:03 -0700 Subject: [PATCH 09/32] Update version after merge from master --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index b964b018da89..51f7b823687e 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.64-SNAPSHOT + 2.20.73-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 895bef8c022f..f1ffbc2ec620 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.64-SNAPSHOT + 2.20.73-SNAPSHOT 4.0.0 From 4fbcdb6579ae79f0b00f8d9180e234a8d9582756 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Fri, 26 May 2023 19:17:28 -0700 Subject: [PATCH 10/32] Refactor retry strategies (#4039) * Refactor the retry strategies This change uses a single class to implement the core logic of all the retries strategies and adds extension points to tailor the behavior when needed. * Rename to BaseRetryStrategy and make it abstract * Remove previous implementations and rename the new ones --- .../findbugs/ToBuilderIsCorrect.java | 10 +- ...eRetryStrategyResourceConstrainedTest.java | 3 +- .../retries/internal/BaseRetryStrategy.java | 393 ++++++++++++++++++ .../DefaultAdaptiveRetryStrategy.java | 318 ++------------ .../internal/DefaultLegacyRetryStrategy.java | 307 ++------------ .../DefaultStandardRetryStrategy.java | 285 +------------ .../internal/RetryStrategyCommonTest.java | 16 +- 7 files changed, 500 insertions(+), 832 deletions(-) create mode 100644 core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java diff --git a/build-tools/src/main/java/software/amazon/awssdk/buildtools/findbugs/ToBuilderIsCorrect.java b/build-tools/src/main/java/software/amazon/awssdk/buildtools/findbugs/ToBuilderIsCorrect.java index 20774b974759..da4f6dbe063c 100644 --- a/build-tools/src/main/java/software/amazon/awssdk/buildtools/findbugs/ToBuilderIsCorrect.java +++ b/build-tools/src/main/java/software/amazon/awssdk/buildtools/findbugs/ToBuilderIsCorrect.java @@ -212,11 +212,15 @@ public void visit(Method method) { } } else if (isBuildable && method.getName().equals("toBuilder") && method.getSignature().startsWith("()")) { // This is a buildable toBuilder - constructorsInvokedFromToBuilder.computeIfAbsent(getDottedClassName(), n -> new HashMap<>()); - toBuilderModifiedFields.computeIfAbsent(getDottedClassName(), n -> new HashMap<>()); + String dottedClassName = getDottedClassName(); + constructorsInvokedFromToBuilder.computeIfAbsent(dottedClassName, n -> new HashMap<>()); + toBuilderModifiedFields.computeIfAbsent(dottedClassName, n -> new HashMap<>()); inBuildableToBuilder = true; inBuilderConstructor = false; - + if (method.isAbstract()) { + // Ignore abstract toBuilder methods, we will still validate the actual implementations. + ignoredBuildables.add(dottedClassName); + } registerIgnoredFields(); } else { inBuildableToBuilder = false; diff --git a/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java index 8ff75edb9411..f721f5c1065a 100644 --- a/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java +++ b/core/retries/src/it/java/software/amazon/awssdk/retries/internal/AdaptiveRetryStrategyResourceConstrainedTest.java @@ -51,6 +51,7 @@ * perceived availability for the clients to be close to 1.0. */ class AdaptiveRetryStrategyResourceConstrainedTest { + static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; @Test void seemsToBeCorrectAndThreadSafe() { @@ -61,12 +62,12 @@ void seemsToBeCorrectAndThreadSafe() { int parallelism = serverWorkers + clientWorkers; ExecutorService executor = Executors.newFixedThreadPool(parallelism); Server server = new Server(serverWorkers, executor); - RateLimiterTokenBucketStore store = RateLimiterTokenBucketStore.builder().build(); AdaptiveRetryStrategy strategy = DefaultAdaptiveRetryStrategy .builder() // We don't care about how many attempts we allow to, that logic is tested somewhere else. // so we give the strategy plenty of room for retries. .maxAttempts(20) + .tokenBucketExceptionCost(DEFAULT_EXCEPTION_TOKEN_COST) .tokenBucketStore(TokenBucketStore.builder().tokenBucketMaxCapacity(10_000).build()) // Just wait for the rate limiter delays. .backoffStrategy(BackoffStrategy.retryImmediately()) diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java new file mode 100644 index 000000000000..4b460137d184 --- /dev/null +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java @@ -0,0 +1,393 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.retries.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; +import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; +import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Generic class that implements that common logic for all the retries + * strategies with extension points for specific strategies to tailor + * the behavior to its needs. + */ +@SdkInternalApi +public abstract class BaseRetryStrategy< + B extends CopyableBuilder & RetryStrategy.Builder, + T extends ToCopyableBuilder & RetryStrategy> implements RetryStrategy { + + protected final Logger log; + protected final List> retryPredicates; + protected final int maxAttempts; + protected final boolean circuitBreakerEnabled; + protected final BackoffStrategy backoffStrategy; + protected final int exceptionCost; + protected final TokenBucketStore tokenBucketStore; + + BaseRetryStrategy(Logger log, Builder builder) { + this.log = log; + this.retryPredicates = Collections.unmodifiableList(Validate.paramNotNull(builder.retryPredicates, "retryPredicates")); + this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); + this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; + this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); + this.exceptionCost = Validate.paramNotNull(builder.exceptionCost, "exceptionCost"); + this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); + } + + /** + * This method implements the logic of {@link + * RetryStrategy#acquireInitialToken(AcquireInitialTokenRequest)}. + * + * @see RetryStrategy#acquireInitialToken(AcquireInitialTokenRequest) + */ + @Override + public final AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + logAcquireInitialToken(request); + DefaultRetryToken token = DefaultRetryToken.builder().scope(request.scope()).build(); + return AcquireInitialTokenResponse.create(token, computeInitialBackoff(request)); + } + + /** + * This method implements the logic of {@link + * RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)}. + * + * @see RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest) + */ + @Override + public final RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + + // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. + // 1) is retryable? + throwOnNonRetryableException(request); + + // 2) max attempts reached? + throwOnMaxAttemptsReached(request); + + // 3) can we acquire a token? + AcquireResponse acquireResponse = requestAcquireCapacity(request, token); + throwOnAcquisitionFailure(request, acquireResponse); + + // All the conditions required to retry were meet, update the internal state before retrying. + updateStateForRetry(request); + + // Refresh the retry token and compute the backoff delay. + DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); + Duration backoff = computeBackoff(request, refreshedToken); + + logRefreshTokenSuccess(refreshedToken, acquireResponse, backoff); + return RefreshRetryTokenResponseImpl.create(refreshedToken, backoff); + } + + /** + * This method implements the logic of {@link + * RetryStrategy#recordSuccess(RecordSuccessRequest)}. + * + * @see RetryStrategy#recordSuccess(RecordSuccessRequest) + */ + @Override + public final RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + + // Update the circuit breaker token bucket. + ReleaseResponse releaseResponse = releaseTokenBucketCapacity(token); + + // Refresh the retry token and return. + DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); + + // Update the state for the specific retry strategy. + updateStateForSuccess(token); + + // Log success and return. + logRecordSuccess(token, releaseResponse); + return RecordSuccessResponse.create(refreshedToken); + } + + @Override + public abstract B toBuilder(); + + + /** + * Computes the backoff before the first attempt, by default + * {@link Duration#ZERO}. Extending classes can override + * this method to compute different a different depending on their + * logic. + */ + protected Duration computeInitialBackoff(AcquireInitialTokenRequest request) { + return Duration.ZERO; + } + + /** + * Computes the backoff before a retry using the configured + * backoff strategy. Extending classes can override + * this method to compute different a different depending on their + * logic. + */ + protected Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { + Duration backoff = backoffStrategy.computeDelay(token.attempt()); + Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); + return maxOf(suggested, backoff); + } + + /** + * Called inside {@link #recordSuccess} to allow extending classes + * to update their internal state after a successful request. + */ + protected void updateStateForSuccess(DefaultRetryToken token) { + } + + /** + * Called inside {@link #refreshRetryToken} to allow extending + * classes to update their internal state before retrying a + * request. + */ + protected void updateStateForRetry(RefreshRetryTokenRequest request) { + } + + /** + * Returns the amount of tokens to withdraw from the token + * bucket. Extending classes can override this method to tailor + * this amount for the specific kind of failure. + */ + protected int exceptionCost(RefreshRetryTokenRequest request) { + if (circuitBreakerEnabled) { + return exceptionCost; + } + return 0; + } + + private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + return token.toBuilder() + .increaseAttempt() + .state(DefaultRetryToken.TokenState.IN_PROGRESS) + .capacityAcquired(acquireResponse.capacityAcquired()) + .capacityRemaining(acquireResponse.capacityRemaining()) + .addFailure(request.failure()) + .build(); + } + + private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + return tokenBucket.tryAcquire(exceptionCost(request)); + } + + private ReleaseResponse releaseTokenBucketCapacity(DefaultRetryToken token) { + TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); + int capacityReleased = token.capacityAcquired(); + return bucket.release(capacityReleased); + } + + private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { + return token.toBuilder() + .capacityRemaining(releaseResponse.currentCapacity()) + .state(DefaultRetryToken.TokenState.SUCCEEDED) + .build(); + } + + private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + if (maxAttemptsReached(token)) { + Throwable failure = request.failure(); + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(tokenBucket.currentCapacity()) + .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) + .addFailure(failure) + .build(); + String message = maxAttemptsReachedMessage(refreshedToken); + log.debug(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private void throwOnNonRetryableException(RefreshRetryTokenRequest request) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + Throwable failure = request.failure(); + if (isNonRetryableException(request)) { + String message = nonRetryableExceptionMessage(token); + log.error(() -> message, failure); + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(tokenBucket.currentCapacity()) + .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) + .addFailure(failure) + .build(); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + int attempt = token.attempt(); + log.debug(() -> String.format("Request attempt %d encountered retryable failure.", attempt), failure); + } + + private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + if (acquireResponse.acquisitionFailed()) { + Throwable failure = request.failure(); + DefaultRetryToken refreshedToken = + token.toBuilder() + .capacityRemaining(acquireResponse.capacityRemaining()) + .capacityAcquired(acquireResponse.capacityAcquired()) + .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) + .addFailure(failure) + .build(); + String message = acquisitionFailedMessage(acquireResponse); + log.debug(() -> message, failure); + throw new TokenAcquisitionFailedException(message, refreshedToken, failure); + } + } + + private String nonRetryableExceptionMessage(DefaultRetryToken token) { + return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); + } + + private String maxAttemptsReachedMessage(DefaultRetryToken token) { + return String.format("Request will not be retried. Retries have been exhausted " + + "(cost: 0, capacity: %d/%d)", + token.capacityAcquired(), + token.capacityRemaining()); + } + + private String acquisitionFailedMessage(AcquireResponse response) { + return String.format("Request will not be retried to protect the caller and downstream service. " + + "The cost of retrying (%d) " + + "exceeds the available retry capacity (%d/%d).", + response.capacityRequested(), + response.capacityRemaining(), + response.maxCapacity()); + } + + private void logAcquireInitialToken(AcquireInitialTokenRequest request) { + // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) + TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); + log.debug(() -> String.format("Request attempt 1 token acquired " + + "(backoff: 0ms, cost: 0, capacity: %d/%d)", + tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); + } + + private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { + log.debug(() -> String.format("Request attempt %d token acquired " + + "(backoff: %dms, cost: %d, capacity: %d/%d)", + token.attempt(), delay.toMillis(), + acquireResponse.capacityAcquired(), + acquireResponse.capacityRemaining(), + acquireResponse.maxCapacity())); + } + + private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { + log.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", + token.attempt(), release.capacityReleased(), + release.currentCapacity(), release.maxCapacity())); + + } + + private boolean maxAttemptsReached(DefaultRetryToken token) { + return token.attempt() >= maxAttempts; + } + + private boolean isNonRetryableException(RefreshRetryTokenRequest request) { + Throwable failure = request.failure(); + for (Predicate retryPredicate : retryPredicates) { + if (retryPredicate.test(failure)) { + return false; + } + } + return true; + } + + static Duration maxOf(Duration left, Duration right) { + if (left.compareTo(right) >= 0) { + return left; + } + return right; + } + + static DefaultRetryToken asDefaultRetryToken(RetryToken token) { + return Validate.isInstanceOf(DefaultRetryToken.class, token, + "RetryToken is of unexpected class (%s), " + + "This token was not created by this retry strategy.", + token.getClass().getName()); + } + + static class Builder { + private List> retryPredicates; + private int maxAttempts; + private Boolean circuitBreakerEnabled; + private Integer exceptionCost; + private BackoffStrategy backoffStrategy; + private TokenBucketStore tokenBucketStore; + + Builder() { + retryPredicates = new ArrayList<>(); + } + + Builder(BaseRetryStrategy strategy) { + this.retryPredicates = new ArrayList<>(strategy.retryPredicates); + this.maxAttempts = strategy.maxAttempts; + this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; + this.exceptionCost = strategy.exceptionCost; + this.backoffStrategy = strategy.backoffStrategy; + this.tokenBucketStore = strategy.tokenBucketStore; + } + + void setRetryOnException(Predicate shouldRetry) { + this.retryPredicates.add(shouldRetry); + } + + void setMaxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + } + + void setTokenBucketStore(TokenBucketStore tokenBucketStore) { + this.tokenBucketStore = tokenBucketStore; + } + + void setCircuitBreakerEnabled(Boolean enabled) { + this.circuitBreakerEnabled = enabled; + } + + void setBackoffStrategy(BackoffStrategy backoffStrategy) { + this.backoffStrategy = backoffStrategy; + } + + void setTokenBucketExceptionCost(int exceptionCost) { + this.exceptionCost = exceptionCost; + } + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java index 40d8820b1297..70347d8a9c4f 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java @@ -16,121 +16,59 @@ package software.amazon.awssdk.retries.internal; import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.retries.AdaptiveRetryStrategy; import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; -import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.api.RecordSuccessRequest; -import software.amazon.awssdk.retries.api.RecordSuccessResponse; import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; -import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; -import software.amazon.awssdk.retries.api.RetryToken; -import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; -import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; -import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; -import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterAcquireResponse; import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucket; import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterTokenBucketStore; -import software.amazon.awssdk.retries.internal.ratelimiter.RateLimiterUpdateResponse; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; -/** - * Implementation of the {@link AdaptiveRetryStrategy} interface. - */ @SdkInternalApi -public final class DefaultAdaptiveRetryStrategy implements AdaptiveRetryStrategy { +public final class DefaultAdaptiveRetryStrategy + extends BaseRetryStrategy implements AdaptiveRetryStrategy { + private static final Logger LOG = Logger.loggerFor(DefaultAdaptiveRetryStrategy.class); - private final List> retryPredicates; - private final int maxAttempts; - private final boolean circuitBreakerEnabled; - private final BackoffStrategy backoffStrategy; - private final int tokenBucketMaxCapacity; - private final int exceptionCost; private final Predicate treatAsThrottling; - private final TokenBucketStore tokenBucketStore; private final RateLimiterTokenBucketStore rateLimiterTokenBucketStore; - private DefaultAdaptiveRetryStrategy(Builder builder) { - this.retryPredicates = Collections.unmodifiableList(Validate.paramNotNull(builder.retryPredicates, "retryPredicates")); - this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); - this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; - this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); - this.exceptionCost = builder.exceptionCost; - this.tokenBucketMaxCapacity = builder.tokenBucketMaxCapacity; + DefaultAdaptiveRetryStrategy(Builder builder) { + super(LOG, builder); this.treatAsThrottling = Validate.paramNotNull(builder.treatAsThrottling, "treatAsThrottling"); - this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); this.rateLimiterTokenBucketStore = Validate.paramNotNull(builder.rateLimiterTokenBucketStore, "rateLimiterTokenBucketStore"); } @Override - public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { - logAcquireInitialToken(request); - return AcquireInitialTokenResponse.create( - DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); + protected Duration computeInitialBackoff(AcquireInitialTokenRequest request) { + RateLimiterTokenBucket bucket = rateLimiterTokenBucketStore.tokenBucketForScope(request.scope()); + return bucket.tryAcquire().delay(); } @Override - public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - AcquireResponse acquireResponse = requestAcquireCapacity(token); - - // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. - // 1) is retryable? - throwOnNonRetryableException(request, acquireResponse); - // 2) max attempts reached? - throwOnMaxAttemptsReached(request, acquireResponse); - // 3) can we acquire a token? - throwOnAcquisitionFailure(request, acquireResponse); + protected Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { + Duration backoff = super.computeBackoff(request, token); + RateLimiterTokenBucket bucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); + return backoff.plus(bucket.tryAcquire().delay()); + } - // All the conditions required to retry were meet, update the send rate if the error is categorized as throttling. - Throwable failure = request.failure(); - RateLimiterTokenBucket rateLimiterTokenBucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); - if (this.treatAsThrottling.test(failure)) { - rateLimiterTokenBucket.updateRateAfterThrottling(); + @Override + protected void updateStateForRetry(RefreshRetryTokenRequest request) { + if (treatAsThrottling.test(request.failure())) { + DefaultRetryToken token = asDefaultRetryToken(request.token()); + RateLimiterTokenBucket bucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); + bucket.updateRateAfterThrottling(); } - - // Refresh the retry token and compute the backoff delay. - DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); - Duration backoff = backoffStrategy.computeDelay(refreshedToken.attempt()); - - // Acquire capacity from the adaptive token. - RateLimiterAcquireResponse rateLimiterAcquireResponse = rateLimiterTokenBucket.tryAcquire(); - - // Take the max delay between the suggested delay, the backoff delay and the delay of the adaptive strategy. - Duration adaptiveDelay = rateLimiterAcquireResponse.delay(); - Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); - Duration finalDelay = maxOf(suggested, backoff).plus(adaptiveDelay); - - logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); - return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); } @Override - public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - - // Update the adaptive token bucket. - updateAdaptiveTokenBucket(token); - - // Update the circuit breaker token bucket. - ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); - - // Refresh the retry token and return - DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); - - // Log success and return. - logRecordSuccess(token, releaseResponse); - return RecordSuccessResponse.create(refreshedToken); + protected void updateStateForSuccess(DefaultRetryToken token) { + RateLimiterTokenBucket bucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); + bucket.updateRateAfterSuccess(); } @Override @@ -138,214 +76,32 @@ public Builder toBuilder() { return new Builder(this); } - /** - * Returns a builder to fine-tune this retry strategy. - * - * @return a builder for this retry strategy. - */ public static Builder builder() { return new Builder(); } - private Duration maxOf(Duration left, Duration right) { - if (left.compareTo(right) >= 0) { - return left; - } - return right; - } - - private RateLimiterUpdateResponse updateAdaptiveTokenBucket(DefaultRetryToken token) { - RateLimiterTokenBucket rateLimiterTokenBucket = rateLimiterTokenBucketStore.tokenBucketForScope(token.scope()); - return rateLimiterTokenBucket.updateRateAfterSuccess(); - } - - private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { - TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); - int capacityReleased = token.capacityAcquired(); - return bucket.release(capacityReleased); - } - - private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { - return token.toBuilder() - .capacityAcquired(0) - .capacityRemaining(releaseResponse.currentCapacity()) - .state(DefaultRetryToken.TokenState.SUCCEEDED) - .build(); - } - - private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (acquireResponse.acquisitionFailed()) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) - .addFailure(failure) - .build(); - String message = acquisitionFailedMessage(acquireResponse); - LOG.error(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (maxAttemptsReached(token)) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .addFailure(failure) - .build(); - String message = maxAttemptsReachedMessage(refreshedToken); - LOG.error(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - Throwable failure = request.failure(); - if (isNonRetryableException(request)) { - String message = nonRetryableExceptionMessage(token); - LOG.error(() -> message, failure); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .addFailure(failure) - .build(); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - int attempt = token.attempt(); - LOG.warn(() -> String.format("Request attempt %d encountered retryable failure.", attempt), failure); - } - - private String nonRetryableExceptionMessage(DefaultRetryToken token) { - return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); - } - - private String maxAttemptsReachedMessage(DefaultRetryToken token) { - return String.format("Request will not be retried. Retries have been exhausted " - + "(cost: 0, capacity: %d/%d)", - token.capacityAcquired(), - token.capacityRemaining()); - } - - private String acquisitionFailedMessage(AcquireResponse acquireResponse) { - return String.format("Request will not be retried to protect the caller and downstream service. " - + "The cost of retrying (%d) " - + "exceeds the available retry capacity (%d/%d).", - acquireResponse.capacityRequested(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity()); - } - - private void logAcquireInitialToken(AcquireInitialTokenRequest request) { - // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); - LOG.debug(() -> String.format("Request attempt 1 token acquired " - + "(backoff: 0ms, cost: 0, capacity: %d/%d)", - tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); - } - - private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { - LOG.debug(() -> String.format("Request attempt %d token acquired " - + "(backoff: %dms, cost: %d, capacity: %d/%d)", - token.attempt(), delay.toMillis(), - acquireResponse.capacityAcquired(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity())); - } - - private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { - LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", - token.attempt(), release.capacityReleased(), - release.currentCapacity(), release.maxCapacity())); - - } - - private boolean maxAttemptsReached(DefaultRetryToken token) { - return token.attempt() >= maxAttempts; - } - - private boolean isNonRetryableException(RefreshRetryTokenRequest request) { - Throwable failure = request.failure(); - for (Predicate retryPredicate : retryPredicates) { - if (retryPredicate.test(failure)) { - return false; - } - } - return true; - } - - static DefaultRetryToken asStandardRetryToken(RetryToken token) { - return Validate.isInstanceOf(DefaultRetryToken.class, token, - "RetryToken is of unexpected class (%s), " - + "This token was not created by this retry strategy.", - token.getClass().getName()); - } - - private AcquireResponse requestAcquireCapacity(DefaultRetryToken token) { - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); - if (!circuitBreakerEnabled) { - return tokenBucket.tryAcquire(0); - } - return tokenBucket.tryAcquire(exceptionCost); - } - - private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - return token.toBuilder() - .increaseAttempt() - .state(DefaultRetryToken.TokenState.IN_PROGRESS) - .capacityAcquired(acquireResponse.capacityAcquired()) - .capacityRemaining(acquireResponse.capacityRemaining()) - .addFailure(request.failure()) - .build(); - } - - public static class Builder implements AdaptiveRetryStrategy.Builder { - private List> retryPredicates; - private int maxAttempts; - private Boolean circuitBreakerEnabled; - private int tokenBucketMaxCapacity; - private int exceptionCost; + public static class Builder extends BaseRetryStrategy.Builder implements AdaptiveRetryStrategy.Builder { private Predicate treatAsThrottling; - private BackoffStrategy backoffStrategy; - private TokenBucketStore tokenBucketStore; private RateLimiterTokenBucketStore rateLimiterTokenBucketStore; Builder() { - retryPredicates = new ArrayList<>(); } Builder(DefaultAdaptiveRetryStrategy strategy) { - this.retryPredicates = new ArrayList<>(strategy.retryPredicates); - this.maxAttempts = strategy.maxAttempts; - this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; - this.tokenBucketMaxCapacity = strategy.tokenBucketMaxCapacity; - this.exceptionCost = strategy.exceptionCost; + super(strategy); this.treatAsThrottling = strategy.treatAsThrottling; - this.backoffStrategy = strategy.backoffStrategy; - this.tokenBucketStore = strategy.tokenBucketStore; this.rateLimiterTokenBucketStore = strategy.rateLimiterTokenBucketStore; } @Override public Builder retryOnException(Predicate shouldRetry) { - this.retryPredicates.add(shouldRetry); + setRetryOnException(shouldRetry); return this; } @Override public Builder maxAttempts(int maxAttempts) { - this.maxAttempts = maxAttempts; + setMaxAttempts(maxAttempts); return this; } @@ -355,33 +111,33 @@ public Builder treatAsThrottling(Predicate treatAsThrottling) { return this; } - public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { - this.tokenBucketStore = tokenBucketStore; + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + setCircuitBreakerEnabled(circuitBreakerEnabled); return this; } - public Builder rateLimiterTokenBucketStore(RateLimiterTokenBucketStore rateLimiterTokenBucketStore) { - this.rateLimiterTokenBucketStore = rateLimiterTokenBucketStore; + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + setBackoffStrategy(backoffStrategy); return this; } - public Builder backoffStrategy(BackoffStrategy backoffStrategy) { - this.backoffStrategy = backoffStrategy; + public Builder tokenBucketExceptionCost(int exceptionCost) { + setTokenBucketExceptionCost(exceptionCost); return this; } - public Builder tokenBucketExceptionCost(int exceptionCost) { - this.exceptionCost = exceptionCost; + public Builder rateLimiterTokenBucketStore(RateLimiterTokenBucketStore rateLimiterTokenBucketStore) { + this.rateLimiterTokenBucketStore = rateLimiterTokenBucketStore; return this; } - public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { - this.circuitBreakerEnabled = circuitBreakerEnabled; + public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { + setTokenBucketStore(tokenBucketStore); return this; } @Override - public DefaultAdaptiveRetryStrategy build() { + public AdaptiveRetryStrategy build() { return new DefaultAdaptiveRetryStrategy(this); } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java index 45cd30646a69..67cb750b8e8f 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java @@ -16,108 +16,31 @@ package software.amazon.awssdk.retries.internal; import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.retries.LegacyRetryStrategy; -import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; -import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.api.RecordSuccessRequest; -import software.amazon.awssdk.retries.api.RecordSuccessResponse; import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; -import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; -import software.amazon.awssdk.retries.api.RetryToken; -import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; -import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenResponseImpl; -import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; -import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.Validate; -/** - * Implementation of the {@link LegacyRetryStrategy} interface. - */ @SdkInternalApi -public final class DefaultLegacyRetryStrategy implements LegacyRetryStrategy { +public final class DefaultLegacyRetryStrategy + extends BaseRetryStrategy implements LegacyRetryStrategy { private static final Logger LOG = Logger.loggerFor(DefaultLegacyRetryStrategy.class); - - private final List> predicates; - private final int maxAttempts; - private final boolean circuitBreakerEnabled; - private final BackoffStrategy backoffStrategy; private final BackoffStrategy throttlingBackoffStrategy; - private final int exceptionCost; private final int throttlingExceptionCost; private final Predicate treatAsThrottling; - private final TokenBucketStore tokenBucketStore; - - private DefaultLegacyRetryStrategy(Builder builder) { - this.predicates = Collections.unmodifiableList(Validate.paramNotNull(builder.predicates, "predicates")); - this.maxAttempts = Validate.isPositive(Validate.paramNotNull(builder.maxAttempts, "maxAttempts"), "maxAttempts"); - this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; - this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); - this.throttlingBackoffStrategy = Validate.paramNotNull(builder.throttlingBackoffStrategy, "throttlingBackoffStrategy"); - this.exceptionCost = Validate.paramNotNull(builder.exceptionCost, "exceptionCost"); - this.throttlingExceptionCost = Validate.paramNotNull(builder.throttlingExceptionCost, "throttlingExceptionCost"); - this.treatAsThrottling = Validate.paramNotNull(builder.treatAsThrottling, "treatAsThrottling"); - this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); - } - @Override - public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { - logAcquireInitialToken(request); - return AcquireInitialTokenResponseImpl.create( - DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); + DefaultLegacyRetryStrategy(Builder builder) { + super(LOG, builder); + this.throttlingExceptionCost = builder.throttlingExceptionCost; + this.throttlingBackoffStrategy = builder.throttlingBackoffStrategy; + this.treatAsThrottling = builder.treatAsThrottling; } @Override - public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - AcquireResponse acquireResponse = requestAcquireCapacity(request, token); - - // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. - // 1) is retryable? - throwOnNonRetryableException(request, acquireResponse); - // 2) max attempts reached? - throwOnMaxAttemptsReached(request, acquireResponse); - // 3) can we acquire a token? - throwOnAcquisitionFailure(request, acquireResponse); - - // Refresh the retry token and compute the backoff delay. - DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); - Duration finalDelay = computeBackoff(request, refreshedToken); - logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); - return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); - } - - @Override - public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); - DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); - logRecordSuccess(token, releaseResponse); - return RecordSuccessResponse.create(refreshedToken); - } - - @Override - public Builder toBuilder() { - return new Builder(this); - } - - /** - * Returns a builder to update this retry strategy. - */ - public static Builder builder() { - return new Builder(); - } - - private Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { + protected Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { Duration backoff; if (treatAsThrottling.test(request.failure())) { backoff = throttlingBackoffStrategy.computeDelay(token.attempt()); @@ -129,233 +52,79 @@ private Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryTo return maxOf(suggested, backoff); } - private Duration maxOf(Duration left, Duration right) { - if (left.compareTo(right) >= 0) { - return left; - } - return right; - } - - private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { - TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); - int capacityReleased = token.capacityAcquired(); - return bucket.release(capacityReleased); - } - - private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { - return token.toBuilder() - .capacityAcquired(0) - .capacityRemaining(releaseResponse.currentCapacity()) - .state(DefaultRetryToken.TokenState.SUCCEEDED) - .build(); - } - - private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (acquireResponse.acquisitionFailed()) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) - .addFailure(failure) - .build(); - String message = acquisitionFailedMessage(acquireResponse); - LOG.debug(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (maxAttemptsReached(token)) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .addFailure(failure) - .build(); - String message = maxAttemptsReachedMessage(refreshedToken); - LOG.debug(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - Throwable failure = request.failure(); - if (isNonRetryableException(request)) { - String message = nonRetryableExceptionMessage(token); - LOG.error(() -> message, failure); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .addFailure(failure) - .build(); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - LOG.debug(() -> nonRetryableExceptionMessage(token), failure); - } - - private String nonRetryableExceptionMessage(DefaultRetryToken token) { - return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); - } - - private String maxAttemptsReachedMessage(DefaultRetryToken token) { - return String.format("Request will not be retried. Retries have been exhausted " - + "(cost: 0, capacity: %d/%d)", - token.capacityAcquired(), - token.capacityRemaining()); - } - - private String acquisitionFailedMessage(AcquireResponse acquireResponse) { - return String.format("Request will not be retried to protect the caller and downstream service. " - + "The cost of retrying (%d) " - + "exceeds the available retry capacity (%d/%d).", - acquireResponse.capacityRequested(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity()); - } - - private void logAcquireInitialToken(AcquireInitialTokenRequest request) { - // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); - LOG.debug(() -> String.format("Request attempt 1 token acquired " - + "(backoff: 0ms, cost: 0, capacity: %d/%d)", - tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); - } - - private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { - LOG.debug(() -> String.format("Request attempt %d token acquired " - + "(backoff: %dms, cost: %d, capacity: %d/%d)", - token.attempt(), delay.toMillis(), - acquireResponse.capacityAcquired(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity())); - } - - private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { - LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", - token.attempt(), release.capacityReleased(), - release.currentCapacity(), release.maxCapacity())); - - } - - private boolean maxAttemptsReached(DefaultRetryToken token) { - return token.attempt() >= maxAttempts; - } - - private boolean isNonRetryableException(RefreshRetryTokenRequest request) { - Throwable failure = request.failure(); - for (Predicate predicate : predicates) { - if (predicate.test(failure)) { - return false; - } - } - return true; - } - - static DefaultRetryToken asStandardRetryToken(RetryToken token) { - return Validate.isInstanceOf(DefaultRetryToken.class, token, - "RetryToken is of unexpected class (%s), " - + "This token was not created by this retry strategy.", - token.getClass().getName()); - } - - private AcquireResponse requestAcquireCapacity(RefreshRetryTokenRequest request, DefaultRetryToken token) { - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); - int amountToAcquire = 0; + @Override + protected int exceptionCost(RefreshRetryTokenRequest request) { if (circuitBreakerEnabled) { if (treatAsThrottling.test(request.failure())) { - amountToAcquire = throttlingExceptionCost; - } else { - amountToAcquire = exceptionCost; + return throttlingExceptionCost; } + return exceptionCost; } - return tokenBucket.tryAcquire(amountToAcquire); + return 0; + } + + @Override + public Builder toBuilder() { + return new Builder(this); } - private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - return token.toBuilder() - .increaseAttempt() - .state(DefaultRetryToken.TokenState.IN_PROGRESS) - .capacityAcquired(acquireResponse.capacityAcquired()) - .capacityRemaining(acquireResponse.capacityRemaining()) - .addFailure(request.failure()) - .build(); + public static Builder builder() { + return new Builder(); } - public static class Builder implements LegacyRetryStrategy.Builder { - private List> predicates; - private Integer maxAttempts; - private Boolean circuitBreakerEnabled; - private Integer exceptionCost; + public static class Builder extends BaseRetryStrategy.Builder implements LegacyRetryStrategy.Builder { + private BackoffStrategy throttlingBackoffStrategy; private Integer throttlingExceptionCost; private Predicate treatAsThrottling; - private BackoffStrategy backoffStrategy; - private BackoffStrategy throttlingBackoffStrategy; - private TokenBucketStore tokenBucketStore; Builder() { - predicates = new ArrayList<>(); } Builder(DefaultLegacyRetryStrategy strategy) { - this.predicates = new ArrayList<>(strategy.predicates); - this.maxAttempts = strategy.maxAttempts; - this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; - this.exceptionCost = strategy.exceptionCost; - this.throttlingExceptionCost = strategy.throttlingExceptionCost; - this.treatAsThrottling = strategy.treatAsThrottling; - this.backoffStrategy = strategy.backoffStrategy; + super(strategy); this.throttlingBackoffStrategy = strategy.throttlingBackoffStrategy; - this.tokenBucketStore = strategy.tokenBucketStore; + this.treatAsThrottling = strategy.treatAsThrottling; + this.throttlingExceptionCost = strategy.throttlingExceptionCost; } @Override public Builder retryOnException(Predicate shouldRetry) { - this.predicates.add(shouldRetry); + setRetryOnException(shouldRetry); return this; } @Override public Builder maxAttempts(int maxAttempts) { - this.maxAttempts = maxAttempts; + setMaxAttempts(maxAttempts); return this; } @Override - public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { - this.circuitBreakerEnabled = circuitBreakerEnabled; + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + setBackoffStrategy(backoffStrategy); return this; } @Override - public Builder backoffStrategy(BackoffStrategy backoffStrategy) { - this.backoffStrategy = backoffStrategy; + public Builder throttlingBackoffStrategy(BackoffStrategy throttlingBackoffStrategy) { + this.throttlingBackoffStrategy = throttlingBackoffStrategy; return this; } @Override - public Builder treatAsThrottling(Predicate treatAsThrottling) { - this.treatAsThrottling = treatAsThrottling; + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + setCircuitBreakerEnabled(circuitBreakerEnabled); return this; } @Override - public Builder throttlingBackoffStrategy(BackoffStrategy throttlingBackoffStrategy) { - this.throttlingBackoffStrategy = throttlingBackoffStrategy; + public Builder treatAsThrottling(Predicate treatAsThrottling) { + this.treatAsThrottling = treatAsThrottling; return this; } public Builder tokenBucketExceptionCost(int exceptionCost) { - this.exceptionCost = exceptionCost; + setTokenBucketExceptionCost(exceptionCost); return this; } @@ -365,12 +134,12 @@ public Builder tokenBucketThrottlingExceptionCost(int throttlingExceptionCost) { } public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { - this.tokenBucketStore = tokenBucketStore; + setTokenBucketStore(tokenBucketStore); return this; } @Override - public DefaultLegacyRetryStrategy build() { + public LegacyRetryStrategy build() { return new DefaultLegacyRetryStrategy(this); } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java index abee804a4def..fee90141a455 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java @@ -15,331 +15,76 @@ package software.amazon.awssdk.retries.internal; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.annotations.ToBuilderIgnoreField; import software.amazon.awssdk.retries.StandardRetryStrategy; -import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; -import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; import software.amazon.awssdk.retries.api.BackoffStrategy; -import software.amazon.awssdk.retries.api.RecordSuccessRequest; -import software.amazon.awssdk.retries.api.RecordSuccessResponse; -import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; -import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; -import software.amazon.awssdk.retries.api.RetryToken; -import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; -import software.amazon.awssdk.retries.api.internal.AcquireInitialTokenResponseImpl; -import software.amazon.awssdk.retries.api.internal.RefreshRetryTokenResponseImpl; -import software.amazon.awssdk.retries.internal.circuitbreaker.AcquireResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.ReleaseResponse; -import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucket; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.Validate; -/** - * Implementation of the {@link StandardRetryStrategy} interface. - */ @SdkInternalApi -public final class DefaultStandardRetryStrategy implements StandardRetryStrategy { +public final class DefaultStandardRetryStrategy + extends BaseRetryStrategy implements StandardRetryStrategy { private static final Logger LOG = Logger.loggerFor(DefaultStandardRetryStrategy.class); - private final List> predicates; - private final int maxAttempts; - private final boolean circuitBreakerEnabled; - private final BackoffStrategy backoffStrategy; - private final int exceptionCost; - private final TokenBucketStore tokenBucketStore; - - private DefaultStandardRetryStrategy(Builder builder) { - this.predicates = Collections.unmodifiableList(Validate.paramNotNull(builder.predicates, "predicates")); - this.maxAttempts = Validate.isPositive(builder.maxAttempts, "maxAttempts"); - this.circuitBreakerEnabled = builder.circuitBreakerEnabled == null || builder.circuitBreakerEnabled; - this.backoffStrategy = Validate.paramNotNull(builder.backoffStrategy, "backoffStrategy"); - this.exceptionCost = builder.exceptionCost; - this.tokenBucketStore = Validate.paramNotNull(builder.tokenBucketStore, "tokenBucketStore"); - } - - @Override - public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { - logAcquireInitialToken(request); - return AcquireInitialTokenResponseImpl.create( - DefaultRetryToken.builder().scope(request.scope()).build(), Duration.ZERO); - } - - @Override - public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - AcquireResponse acquireResponse = requestAcquireCapacity(token); - - // Check if we meet the preconditions needed for retrying. These will throw if the expected condition is not meet. - // 1) is retryable? - throwOnNonRetryableException(request, acquireResponse); - // 2) max attempts reached? - throwOnMaxAttemptsReached(request, acquireResponse); - // 3) can we acquire a token? - throwOnAcquisitionFailure(request, acquireResponse); - - // Refresh the retry token and compute the backoff delay. - DefaultRetryToken refreshedToken = refreshToken(request, acquireResponse); - Duration backoff = backoffStrategy.computeDelay(refreshedToken.attempt()); - - // Take the max delay between the suggested delay and the backoff delay. - Duration suggested = request.suggestedDelay().orElse(Duration.ZERO); - Duration finalDelay = maxOf(suggested, backoff); - - logRefreshTokenSuccess(refreshedToken, acquireResponse, finalDelay); - return RefreshRetryTokenResponseImpl.create(refreshedToken, finalDelay); - } - - @Override - public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - - // Update the circuit breaker token bucket. - ReleaseResponse releaseResponse = updateCircuitBreakerTokenBucket(token); - - // Refresh the retry token and return - DefaultRetryToken refreshedToken = refreshRetryTokenAfterSuccess(token, releaseResponse); - - // Log success and return. - logRecordSuccess(token, releaseResponse); - return RecordSuccessResponse.create(refreshedToken); + DefaultStandardRetryStrategy(Builder builder) { + super(LOG, builder); } @Override - @ToBuilderIgnoreField({"DEFAULT_EXCEPTION_TOKEN_COST", "DEFAULT_TOKEN_BUCKET_SIZE"}) public Builder toBuilder() { return new Builder(this); } - /** - * Returns a builder to update this retry strategy. - */ public static Builder builder() { return new Builder(); } - private Duration maxOf(Duration left, Duration right) { - if (left.compareTo(right) >= 0) { - return left; - } - return right; - } - - private ReleaseResponse updateCircuitBreakerTokenBucket(DefaultRetryToken token) { - TokenBucket bucket = tokenBucketStore.tokenBucketForScope(token.scope()); - int capacityReleased = token.capacityAcquired(); - return bucket.release(capacityReleased); - } - - private DefaultRetryToken refreshRetryTokenAfterSuccess(DefaultRetryToken token, ReleaseResponse releaseResponse) { - return token.toBuilder() - .capacityAcquired(0) - .capacityRemaining(releaseResponse.currentCapacity()) - .state(DefaultRetryToken.TokenState.SUCCEEDED) - .build(); - } - - private void throwOnAcquisitionFailure(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (acquireResponse.acquisitionFailed()) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.TOKEN_ACQUISITION_FAILED) - .addFailure(failure) - .build(); - String message = acquisitionFailedMessage(acquireResponse); - LOG.error(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnMaxAttemptsReached(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - if (maxAttemptsReached(token)) { - Throwable failure = request.failure(); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) - .addFailure(failure) - .build(); - String message = maxAttemptsReachedMessage(refreshedToken); - LOG.error(() -> message, failure); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - } - - private void throwOnNonRetryableException(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - Throwable failure = request.failure(); - if (isNonRetryableException(request)) { - String message = nonRetryableExceptionMessage(token); - LOG.error(() -> message, failure); - DefaultRetryToken refreshedToken = - token.toBuilder() - .capacityRemaining(acquireResponse.capacityRemaining()) - .capacityAcquired(acquireResponse.capacityAcquired()) - .state(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) - .addFailure(failure) - .build(); - throw new TokenAcquisitionFailedException(message, refreshedToken, failure); - } - int attempt = token.attempt(); - LOG.warn(() -> String.format("Request attempt %d encountered retryable failure.", attempt), failure); - } - - private String nonRetryableExceptionMessage(DefaultRetryToken token) { - return String.format("Request attempt %d encountered non-retryable failure", token.attempt()); - } - - private String maxAttemptsReachedMessage(DefaultRetryToken token) { - return String.format("Request will not be retried. Retries have been exhausted " - + "(cost: 0, capacity: %d/%d)", - token.capacityAcquired(), - token.capacityRemaining()); - } - - private String acquisitionFailedMessage(AcquireResponse acquireResponse) { - return String.format("Request will not be retried to protect the caller and downstream service. " - + "The cost of retrying (%d) " - + "exceeds the available retry capacity (%d/%d).", - acquireResponse.capacityRequested(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity()); - } - - private void logAcquireInitialToken(AcquireInitialTokenRequest request) { - // Request attempt 1 token acquired (backoff: 0ms, cost: 0, capacity: 500/500) - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(request.scope()); - LOG.debug(() -> String.format("Request attempt 1 token acquired " - + "(backoff: 0ms, cost: 0, capacity: %d/%d)", - tokenBucket.currentCapacity(), tokenBucket.maxCapacity())); - } - - private void logRefreshTokenSuccess(DefaultRetryToken token, AcquireResponse acquireResponse, Duration delay) { - LOG.debug(() -> String.format("Request attempt %d token acquired " - + "(backoff: %dms, cost: %d, capacity: %d/%d)", - token.attempt(), delay.toMillis(), - acquireResponse.capacityAcquired(), - acquireResponse.capacityRemaining(), - acquireResponse.maxCapacity())); - } - - private void logRecordSuccess(DefaultRetryToken token, ReleaseResponse release) { - LOG.debug(() -> String.format("Request attempt %d succeeded (cost: -%d, capacity: %d/%d)", - token.attempt(), release.capacityReleased(), - release.currentCapacity(), release.maxCapacity())); - - } - - private boolean maxAttemptsReached(DefaultRetryToken token) { - return token.attempt() >= maxAttempts; - } - - private boolean isNonRetryableException(RefreshRetryTokenRequest request) { - Throwable failure = request.failure(); - for (Predicate predicate : predicates) { - if (predicate.test(failure)) { - return false; - } - } - return true; - } - - static DefaultRetryToken asStandardRetryToken(RetryToken token) { - return Validate.isInstanceOf(DefaultRetryToken.class, token, - "RetryToken is of unexpected class (%s), " - + "This token was not created by this retry strategy.", - token.getClass().getName()); - } - - private AcquireResponse requestAcquireCapacity(DefaultRetryToken token) { - TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); - if (!circuitBreakerEnabled) { - return tokenBucket.tryAcquire(0); - } - return tokenBucket.tryAcquire(exceptionCost); - } - - private DefaultRetryToken refreshToken(RefreshRetryTokenRequest request, AcquireResponse acquireResponse) { - DefaultRetryToken token = asStandardRetryToken(request.token()); - return token.toBuilder() - .increaseAttempt() - .state(DefaultRetryToken.TokenState.IN_PROGRESS) - .capacityAcquired(acquireResponse.capacityAcquired()) - .capacityRemaining(acquireResponse.capacityRemaining()) - .addFailure(request.failure()) - .build(); - } - - public static class Builder implements StandardRetryStrategy.Builder { - private static final int DEFAULT_TOKEN_BUCKET_SIZE = 500; - private List> predicates; - private int maxAttempts; - private Boolean circuitBreakerEnabled; - private int exceptionCost; - private BackoffStrategy backoffStrategy; - private TokenBucketStore tokenBucketStore; + public static class Builder extends BaseRetryStrategy.Builder implements StandardRetryStrategy.Builder { Builder() { - predicates = new ArrayList<>(); } Builder(DefaultStandardRetryStrategy strategy) { - this.predicates = new ArrayList<>(strategy.predicates); - this.maxAttempts = strategy.maxAttempts; - this.circuitBreakerEnabled = strategy.circuitBreakerEnabled; - this.exceptionCost = strategy.exceptionCost; - this.backoffStrategy = strategy.backoffStrategy; - this.tokenBucketStore = strategy.tokenBucketStore; + super(strategy); } @Override public Builder retryOnException(Predicate shouldRetry) { - this.predicates.add(shouldRetry); + setRetryOnException(shouldRetry); return this; } @Override public Builder maxAttempts(int maxAttempts) { - this.maxAttempts = maxAttempts; + setMaxAttempts(maxAttempts); return this; } @Override - public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { - this.circuitBreakerEnabled = circuitBreakerEnabled; + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + setBackoffStrategy(backoffStrategy); return this; } @Override - public Builder backoffStrategy(BackoffStrategy backoffStrategy) { - this.backoffStrategy = backoffStrategy; + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + setCircuitBreakerEnabled(circuitBreakerEnabled); return this; } public Builder tokenBucketExceptionCost(int exceptionCost) { - this.exceptionCost = exceptionCost; + setTokenBucketExceptionCost(exceptionCost); return this; } public Builder tokenBucketStore(TokenBucketStore tokenBucketStore) { - this.tokenBucketStore = tokenBucketStore; + setTokenBucketStore(tokenBucketStore); return this; } @Override - public DefaultStandardRetryStrategy build() { + public StandardRetryStrategy build() { return new DefaultStandardRetryStrategy(this); } } diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java index 6ec0dfcdf6cf..e9fc3f8b56fc 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java @@ -107,32 +107,32 @@ static Stream buildCases(Function defaultTestCaseSup .configure(b -> b.maxAttempts(3)) .configure(b -> b.retryOnException(IllegalArgumentException.class)) .givenExceptions(IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + // Acquire (cost * 2) + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) .expectThrows() , testCaseSupplier.apply("Fails when 4 exceptions are thrown out max of 3") .configure(b -> b.maxAttempts(3)) .configure(b -> b.retryOnException(IllegalArgumentException.class)) .givenExceptions(IAE, IAE, IAE, IAE) - // Acquire (cost * 3) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 3)) + // Acquire (cost * 2) + .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) .expectState(DefaultRetryToken.TokenState.MAX_RETRIES_REACHED) .expectThrows() , testCaseSupplier.apply("Fails when non-retryable exception throw in the 1st attempt") .configure(b -> b.maxAttempts(3)) .configure(b -> b.retryOnException(IllegalArgumentException.class)) .givenExceptions(RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) + // Acquire (cost * 1) + .expectCapacity(TEST_BUCKET_CAPACITY) .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) .expectThrows() , testCaseSupplier.apply("Fails when non-retryable exception throw in the 2nd attempt") .configure(b -> b.maxAttempts(3)) .configure(b -> b.retryOnException(IllegalArgumentException.class)) .givenExceptions(IAE, RTE) - // Acquire (cost * 1) and then return zero - .expectCapacity(TEST_BUCKET_CAPACITY - (TEST_EXCEPTION_COST * 2)) + // Acquire (cost * 1) + .expectCapacity(TEST_BUCKET_CAPACITY - TEST_EXCEPTION_COST) .expectState(DefaultRetryToken.TokenState.NON_RETRYABLE_EXCEPTION) .expectThrows() , testCaseSupplier.apply("Exhausts the token bucket.") From c434b2641514a5638c561a0f539b1f4acea3451d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Thu, 1 Jun 2023 17:57:01 -0700 Subject: [PATCH 11/32] Update sdk version --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 51f7b823687e..84d544f7e001 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.73-SNAPSHOT + 2.20.78-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index f1ffbc2ec620..94256847313c 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.73-SNAPSHOT + 2.20.78-SNAPSHOT 4.0.0 From 03817c3efd03311d43a876afe4b24787798132a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 2 Jun 2023 16:31:34 -0700 Subject: [PATCH 12/32] Fix the retry condition to just look for the initial cause --- .../amazon/awssdk/retries/api/RetryStrategy.java | 6 ++---- .../awssdk/retries/api/RetryStrategyBuilderTest.java | 10 +--------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java index 8afeddbd461e..6361ffe171cf 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -127,11 +127,10 @@ default B retryOnExceptionOrCause(Class throwable) { return true; } Throwable cause = t.getCause(); - while (cause != null) { + if (cause != null) { if (cause.getClass() == throwable) { return true; } - cause = cause.getCause(); } return false; }); @@ -147,11 +146,10 @@ default B retryOnExceptionOrCauseInstanceOf(Class throwable return true; } Throwable cause = t.getCause(); - while (cause != null) { + if (cause != null) { if (throwable.isAssignableFrom(cause.getClass())) { return true; } - cause = cause.getCause(); } return false; }); diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index 025d867d9263..016d9f56f50e 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -58,11 +58,7 @@ static Collection parameters() { .expectShouldRetry() , new TestCase() .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) - .givenThrowable(new RuntimeException(new RuntimeException(new IllegalArgumentException()))) - .expectShouldRetry() - , new TestCase() - .configure(b -> b.retryOnExceptionOrCause(IllegalArgumentException.class)) - .givenThrowable(new RuntimeException(new RuntimeException(new NumberFormatException()))) + .givenThrowable(new RuntimeException(new NumberFormatException())) .expectShouldNotRetry() , new TestCase() .configure(b -> b.retryOnExceptionInstanceOf(IllegalArgumentException.class)) @@ -96,10 +92,6 @@ static Collection parameters() { .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) .givenThrowable(new NumberFormatException()) .expectShouldRetry() - , new TestCase() - .configure(b -> b.retryOnExceptionOrCauseInstanceOf(IllegalArgumentException.class)) - .givenThrowable(new RuntimeException(new RuntimeException(new NumberFormatException()))) - .expectShouldRetry() ); } From 811c9b10891b6e50832a48f7c76808148709813f Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 7 Jun 2023 15:05:56 -0700 Subject: [PATCH 13/32] Add new sync and async retryable stages (#4062) * Add new sync and async retryable stages * Address PR comments --- core/aws-core/pom.xml | 10 + .../builder/AwsDefaultClientBuilder.java | 32 ++ .../awscore/retry/AwsRetryStrategy.java | 138 +++++++++ .../awssdk/retries/DefaultRetryStrategy.java | 9 + core/sdk-core/pom.xml | 10 + .../builder/SdkDefaultClientBuilder.java | 51 +++- .../config/ClientOverrideConfiguration.java | 55 ++++ .../core/client/config/SdkClientOption.java | 7 + .../config/SdkClientOptionValidation.java | 2 + .../InternalCoreExecutionAttribute.java | 4 + .../internal/http/AmazonAsyncHttpClient.java | 5 +- .../internal/http/AmazonSyncHttpClient.java | 4 +- .../pipeline/stages/ApplyUserAgentStage.java | 10 +- .../pipeline/stages/AsyncRetryableStage2.java | 155 ++++++++++ .../http/pipeline/stages/RetryableStage2.java | 90 ++++++ .../stages/utils/RetryableStageHelper2.java | 289 ++++++++++++++++++ .../internal/retry/RetryPolicyAdapter.java | 190 ++++++++++++ .../retry/SdkDefaultRetryStrategy.java | 178 +++++++++++ .../backoff/FullJitterBackoffStrategy.java | 1 + .../AsyncClientHandlerExceptionTest.java | 2 + .../handler/AsyncClientHandlerTest.java | 2 + .../core/http/AmazonHttpClientTest.java | 6 +- .../response/NullErrorResponseHandler.java | 4 +- .../AsyncHttpClientApiCallTimeoutTests.java | 3 + .../HttpClientApiCallAttemptTimeoutTest.java | 2 + .../timers/HttpClientApiCallTimeoutTest.java | 2 + .../src/test/java/utils/HttpTestUtils.java | 43 ++- .../services/retry/RetryHeaderTestSuite.java | 2 + 28 files changed, 1283 insertions(+), 23 deletions(-) create mode 100644 core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage2.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 0373de20bccb..63b196faccdd 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -78,6 +78,16 @@ utils ${awsjavasdk.version} + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + + + software.amazon.awssdk + retries + ${awsjavasdk.version} + software.amazon.eventstream eventstream diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index c41e604afccb..8460e4975f1e 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.awscore.internal.defaultsmode.DefaultsModeConfiguration; import software.amazon.awssdk.awscore.internal.defaultsmode.DefaultsModeResolver; import software.amazon.awssdk.awscore.retry.AwsRetryPolicy; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.builder.SdkDefaultClientBuilder; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -53,6 +54,7 @@ import software.amazon.awssdk.regions.ServiceMetadata; import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.Logger; @@ -196,6 +198,7 @@ protected final SdkClientConfiguration finalizeChildConfiguration(SdkClientConfi .option(SdkClientOption.EXECUTION_INTERCEPTORS, addAwsInterceptors(configuration)) .option(AwsClientOption.SIGNING_REGION, resolveSigningRegion(configuration)) .option(SdkClientOption.RETRY_POLICY, resolveAwsRetryPolicy(configuration)) + .option(SdkClientOption.RETRY_STRATEGY, resolveAwsRetryStrategy(configuration)) .build(); } @@ -375,6 +378,35 @@ private RetryPolicy resolveAwsRetryPolicy(SdkClientConfiguration config) { .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); return AwsRetryPolicy.forRetryMode(retryMode); + // TODO: fixme This will be changed like this to pick the configured retry strategy + // if no retry policy is configured. + /* + RetryPolicy policy = config.option(SdkClientOption.RETRY_POLICY); + + if (policy != null) { + if (policy.additionalRetryConditionsAllowed()) { + return AwsRetryPolicy.addRetryConditions(policy); + } else { + return policy; + } + } + + // If we don't have a configured retry policy we will use the configured retry strategy instead. + return null; + */ + } + + private RetryStrategy resolveAwsRetryStrategy(SdkClientConfiguration config) { + RetryStrategy strategy = config.option(SdkClientOption.RETRY_STRATEGY); + if (strategy != null) { + return AwsRetryStrategy.addRetryConditions(strategy); + } + RetryMode retryMode = RetryMode.resolver() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) + .resolve(); + return AwsRetryStrategy.forRetryMode(retryMode); } @Override diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java new file mode 100644 index 000000000000..d10dee1f623b --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.retry; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.awscore.internal.AwsErrorCode; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.LegacyRetryStrategy; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; + +/** + * Retry strategies used by clients when communicating with AWS services. + */ +@SdkPublicApi +public final class AwsRetryStrategy { + + private AwsRetryStrategy() { + } + + /** + * Retrieve the {@link SdkDefaultRetryStrategy#defaultRetryStrategy()} with AWS-specific conditions added. + * + * @return The default retry strategy. + */ + public static RetryStrategy defaultRetryStrategy() { + return forRetryMode(RetryMode.defaultRetryMode()); + } + + /** + * Retrieve the appropriate retry strategy for the retry mode with AWS-specific conditions added. + * + * @param mode The retry mode for which we want to create a retry strategy. + * @return A retry strategy for the given retry mode. + */ + public static RetryStrategy forRetryMode(RetryMode mode) { + switch (mode) { + case STANDARD: + return standardRetryStrategy(); + case ADAPTIVE: + return adaptiveRetryStrategy(); + case LEGACY: + return legacyRetryStrategy(); + default: + throw new IllegalArgumentException("unknown retry mode: " + mode); + } + } + + /** + * Update the provided {@link RetryStrategy} to add AWS-specific conditions. + * + * @param strategy The strategy to update + * @return The updated strategy + */ + public static RetryStrategy addRetryConditions(RetryStrategy strategy) { + return strategy.toBuilder() + .retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors) + .build(); + } + + /** + * Returns a retry strategy that does not retry. + * + * @return A retry strategy that does not retry. + */ + public static RetryStrategy none() { + return DefaultRetryStrategy.none(); + } + + + /** + * Returns a {@link StandardRetryStrategy} with AWS-specific conditions added. + * + * @return A {@link StandardRetryStrategy} with AWS-specific conditions added. + */ + public static StandardRetryStrategy standardRetryStrategy() { + StandardRetryStrategy.Builder builder = SdkDefaultRetryStrategy.standardRetryStrategyBuilder(); + return configure(builder).build(); + } + + /** + * Returns a {@link LegacyRetryStrategy} with AWS-specific conditions added. + * + * @return A {@link LegacyRetryStrategy} with AWS-specific conditions added. + */ + public static LegacyRetryStrategy legacyRetryStrategy() { + LegacyRetryStrategy.Builder builder = SdkDefaultRetryStrategy.legacyRetryStrategyBuilder(); + return configure(builder) + .build(); + } + + /** + * Returns an {@link AdaptiveRetryStrategy} with AWS-specific conditions added. + * + * @return An {@link AdaptiveRetryStrategy} with AWS-specific conditions added. + */ + public static AdaptiveRetryStrategy adaptiveRetryStrategy() { + AdaptiveRetryStrategy.Builder builder = SdkDefaultRetryStrategy.adaptiveRetryStrategyBuilder(); + return configure(builder) + .build(); + } + + /** + * Configures a retry strategy using its builder to add AWS-specific retry exceptions. + * + * @param builder The builder to add the AWS-specific retry exceptions + * @return The given builder + * @param The type of the builder extending {@link RetryStrategy.Builder} + */ + public static > T configure(T builder) { + return builder.retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors); + } + + private static boolean retryOnAwsRetryableErrors(Throwable ex) { + if (ex instanceof AwsServiceException) { + AwsServiceException exception = (AwsServiceException) ex; + return AwsErrorCode.RETRYABLE_ERROR_CODES.contains(exception.awsErrorDetails().errorCode()); + } + return false; + } +} diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index 7de8cfc66722..2900a78c7765 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -29,6 +29,15 @@ public final class DefaultRetryStrategy { private DefaultRetryStrategy() { } + /** + * Creates a non-retrying strategy. + */ + public static StandardRetryStrategy none() { + return standardStrategyBuilder() + .maxAttempts(1) + .build(); + } + /** * Create a new builder for a {@link StandardRetryStrategy}. * diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 9ea52576ae6b..da1c25e7f3bc 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -61,6 +61,16 @@ profiles ${awsjavasdk.version} + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + + + software.amazon.awssdk + retries + ${awsjavasdk.version} + org.slf4j slf4j-api diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 18fcc1e52f2e..2dfc816c10a0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -39,6 +39,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_FILE_SUPPLIER; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_NAME; import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_POLICY; +import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_STRATEGY; import static software.amazon.awssdk.core.client.config.SdkClientOption.SCHEDULED_EXECUTOR_SERVICE; import static software.amazon.awssdk.core.client.config.SdkClientOption.SIGNER_OVERRIDDEN; import static software.amazon.awssdk.core.client.config.SdkClientOption.SYNC_HTTP_CLIENT; @@ -77,6 +78,7 @@ import software.amazon.awssdk.core.internal.interceptor.HttpChecksumRequiredInterceptor; import software.amazon.awssdk.core.internal.interceptor.HttpChecksumValidationInterceptor; import software.amazon.awssdk.core.internal.interceptor.SyncHttpChecksumInTrailerInterceptor; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.util.SdkUserAgent; @@ -89,6 +91,10 @@ import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSupplier; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.LegacyRetryStrategy; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.ScheduledExecutorUtils; @@ -226,6 +232,7 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration builder.option(SCHEDULED_EXECUTOR_SERVICE, clientOverrideConfiguration.scheduledExecutorService().orElse(null)); builder.option(EXECUTION_INTERCEPTORS, clientOverrideConfiguration.executionInterceptors()); builder.option(RETRY_POLICY, clientOverrideConfiguration.retryPolicy().orElse(null)); + builder.option(RETRY_STRATEGY, clientOverrideConfiguration.retryStrategy().orElse(null)); builder.option(ADDITIONAL_HTTP_HEADERS, clientOverrideConfiguration.headers()); builder.option(SIGNER, clientOverrideConfiguration.advancedOption(SIGNER).orElse(null)); builder.option(USER_AGENT_SUFFIX, clientOverrideConfiguration.advancedOption(USER_AGENT_SUFFIX).orElse(null)); @@ -314,21 +321,40 @@ private SdkClientConfiguration finalizeAsyncConfiguration(SdkClientConfiguration */ private SdkClientConfiguration finalizeConfiguration(SdkClientConfiguration config) { RetryPolicy retryPolicy = resolveRetryPolicy(config); + RetryStrategy retryStrategy = resolveRetryStrategy(config); + String retryMode = resolveRetryMode(retryPolicy, retryStrategy); return config.toBuilder() .option(SCHEDULED_EXECUTOR_SERVICE, resolveScheduledExecutorService(config)) .option(EXECUTION_INTERCEPTORS, resolveExecutionInterceptors(config)) .option(RETRY_POLICY, retryPolicy) - .option(CLIENT_USER_AGENT, resolveClientUserAgent(config, retryPolicy)) + .option(RETRY_STRATEGY, retryStrategy) + .option(CLIENT_USER_AGENT, resolveClientUserAgent(config, retryMode)) .build(); } - private String resolveClientUserAgent(SdkClientConfiguration config, RetryPolicy retryPolicy) { + private String resolveRetryMode(RetryPolicy retryPolicy, RetryStrategy retryStrategy) { + if (retryPolicy != null) { + return retryPolicy.retryMode().toString(); + } + if (retryStrategy instanceof StandardRetryStrategy) { + return RetryMode.STANDARD.toString(); + } + if (retryStrategy instanceof LegacyRetryStrategy) { + return RetryMode.LEGACY.toString(); + } + if (retryStrategy instanceof AdaptiveRetryStrategy) { + return RetryMode.ADAPTIVE.toString(); + } + return "UnknownRetryMode"; + } + + private String resolveClientUserAgent(SdkClientConfiguration config, String retryMode) { return ApplyUserAgentStage.resolveClientUserAgent(config.option(USER_AGENT_PREFIX), config.option(INTERNAL_USER_AGENT), config.option(CLIENT_TYPE), config.option(SYNC_HTTP_CLIENT), config.option(ASYNC_HTTP_CLIENT), - retryPolicy); + retryMode); } private RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { @@ -343,6 +369,25 @@ private RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); return RetryPolicy.forRetryMode(retryMode); + // TODO: fixme This will be changed like this to pick the configured retry strategy + // if no retry policy is configured. + /* + return config.option(SdkClientOption.RETRY_POLICY); + */ + } + + private RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { + RetryStrategy strategy = config.option(RETRY_STRATEGY); + if (strategy != null) { + return strategy; + } + + RetryMode retryMode = RetryMode.resolver() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) + .resolve(); + return SdkDefaultRetryStrategy.forRetryMode(retryMode); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 83cf2317038d..9f3f7dfbb4a5 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -31,12 +31,14 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.ToString; @@ -55,6 +57,7 @@ public final class ClientOverrideConfiguration implements ToCopyableBuilder { private final Map> headers; private final RetryPolicy retryPolicy; + private final RetryStrategy retryStrategy; private final List executionInterceptors; private final AttributeMap advancedOptions; private final Duration apiCallAttemptTimeout; @@ -71,6 +74,7 @@ public final class ClientOverrideConfiguration private ClientOverrideConfiguration(Builder builder) { this.headers = CollectionUtils.deepUnmodifiableMap(builder.headers(), () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); this.retryPolicy = builder.retryPolicy(); + this.retryStrategy = builder.retryStrategy(); this.executionInterceptors = Collections.unmodifiableList(new ArrayList<>(builder.executionInterceptors())); this.advancedOptions = builder.advancedOptions(); this.apiCallTimeout = Validate.isPositiveOrNull(builder.apiCallTimeout(), "apiCallTimeout"); @@ -89,6 +93,7 @@ public Builder toBuilder() { .advancedOptions(advancedOptions.toBuilder()) .headers(headers) .retryPolicy(retryPolicy) + .retryStrategy(retryStrategy) .apiCallTimeout(apiCallTimeout) .apiCallAttemptTimeout(apiCallAttemptTimeout) .executionInterceptors(executionInterceptors) @@ -127,6 +132,15 @@ public Optional retryPolicy() { return Optional.ofNullable(retryPolicy); } + /** + * The optional retry strategy that should be used when handling failure cases. + * + * @see Builder#retryStrategy(RetryStrategy) + */ + public Optional retryStrategy() { + return Optional.ofNullable(retryStrategy); + } + /** * Load the optional requested advanced option that was configured on the client builder. * @@ -235,6 +249,7 @@ public String toString() { return ToString.builder("ClientOverrideConfiguration") .add("headers", headers) .add("retryPolicy", retryPolicy) + .add("retryStrategy", retryStrategy) .add("apiCallTimeout", apiCallTimeout) .add("apiCallAttemptTimeout", apiCallAttemptTimeout) .add("executionInterceptors", executionInterceptors) @@ -323,6 +338,34 @@ default Builder retryPolicy(RetryMode retryMode) { RetryPolicy retryPolicy(); + /** + * Configure the retry mode used to determine the retry strategy that is used when handling failure cases. This is + * shorthand for {@code retryStrategy(SdkDefaultRetryStrategy.forRetryMode(retryMode))}, and overrides any configured + * retry policy on this builder. + */ + default Builder retryStrategy(RetryMode retryMode) { + return retryStrategy(SdkDefaultRetryStrategy.forRetryMode(retryMode)); + } + + /** + * Configure the retry strategy that should be used when handling failure cases. + * + * @see ClientOverrideConfiguration#retryStrategy() + */ + Builder retryStrategy(RetryStrategy retryStrategy); + + /** + * Configure the retry strategy that should be used when handling failure cases. + */ + default Builder retryStrategy(Consumer> mutator) { + RetryStrategy.Builder builder = SdkDefaultRetryStrategy.forRetryMode(RetryMode.defaultRetryMode()) + .toBuilder(); + mutator.accept(builder); + return retryStrategy(builder.build()); + } + + RetryStrategy retryStrategy(); + /** * Configure a list of execution interceptors that will have access to read and modify the request and response objcets as * they are processed by the SDK. These will replace any interceptors configured previously with this method or @@ -521,6 +564,7 @@ default Builder retryPolicy(RetryMode retryMode) { private static final class DefaultClientOverrideConfigurationBuilder implements Builder { private Map> headers = new HashMap<>(); private RetryPolicy retryPolicy; + private RetryStrategy retryStrategy; private List executionInterceptors = new ArrayList<>(); private AttributeMap.Builder advancedOptions = AttributeMap.builder(); private Duration apiCallTimeout; @@ -570,6 +614,17 @@ public RetryPolicy retryPolicy() { return retryPolicy; } + @Override + public Builder retryStrategy(RetryStrategy retryStrategy) { + this.retryStrategy = retryStrategy; + return this; + } + + @Override + public RetryStrategy retryStrategy() { + return this.retryStrategy; + } + @Override public Builder executionInterceptors(List executionInterceptors) { Validate.paramNotNull(executionInterceptors, "executionInterceptors"); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 07361d75f23d..2f528beaecb0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -33,6 +33,7 @@ import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.AttributeMap; /** @@ -51,6 +52,12 @@ public final class SdkClientOption extends ClientOption { */ public static final SdkClientOption RETRY_POLICY = new SdkClientOption<>(RetryPolicy.class); + /** + * @see ClientOverrideConfiguration#retryStrategy() + */ + @SuppressWarnings("rawtypes") + public static final SdkClientOption RETRY_STRATEGY = new SdkClientOption<>(RetryStrategy.class); + /** * @see ClientOverrideConfiguration#executionInterceptors() */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java index 8da90365bd28..56203c459fdc 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java @@ -46,7 +46,9 @@ private static void validateClientOptions(SdkClientConfiguration c) { require("overrideConfiguration.additionalHttpHeaders", c.option(SdkClientOption.ADDITIONAL_HTTP_HEADERS)); require("overrideConfiguration.executionInterceptors", c.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + // TODO: fixme, this will be removed as retryPolicy will be optional require("overrideConfiguration.retryPolicy", c.option(SdkClientOption.RETRY_POLICY)); + require("overrideConfiguration.retryStrategy", c.option(SdkClientOption.RETRY_STRATEGY)); require("overrideConfiguration.advancedOption[USER_AGENT_PREFIX]", c.option(SdkAdvancedClientOption.USER_AGENT_PREFIX)); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/InternalCoreExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/InternalCoreExecutionAttribute.java index 8c0748bf20fc..12debb04a06e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/InternalCoreExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/InternalCoreExecutionAttribute.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.retries.api.RetryToken; /** * Attributes that can be applied to all sdk requests. These attributes are only used internally by the core to @@ -32,6 +33,9 @@ public final class InternalCoreExecutionAttribute extends SdkExecutionAttribute public static final ExecutionAttribute EXECUTION_ATTEMPT = new ExecutionAttribute<>("SdkInternalExecutionAttempt"); + public static final ExecutionAttribute RETRY_TOKEN = + new ExecutionAttribute<>("SdkInternalRetryToken"); + private InternalCoreExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index 7d0ebce693c5..1f6ef0a4a61a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -35,7 +35,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncApiCallTimeoutTrackingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncBeforeTransmissionExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncExecutionFailureExceptionReportingStage; -import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage2; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncSigningStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeAsyncHttpRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeRequestImmutableStage; @@ -175,7 +175,8 @@ public CompletableFuture execute( .then(AsyncBeforeTransmissionExecutionInterceptorsStage::new) .then(d -> new MakeAsyncHttpRequestStage<>(responseHandler, d)) .wrappedWith(AsyncApiCallAttemptMetricCollectionStage::new) - .wrappedWith((deps, wrapped) -> new AsyncRetryableStage<>(responseHandler, deps, wrapped)) + .wrappedWith((deps, wrapped) -> new AsyncRetryableStage2<>(responseHandler, deps, + wrapped)) .then(async(() -> new UnwrapResponseContainer<>())) .then(async(() -> new AfterExecutionInterceptorsStage<>())) .wrappedWith(AsyncExecutionFailureExceptionReportingStage::new) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index 91e24c798f31..ed513e66ed41 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -42,7 +42,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeRequestMutableStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MergeCustomHeadersStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MergeCustomQueryParamsStage; -import software.amazon.awssdk.core.internal.http.pipeline.stages.RetryableStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.RetryableStage2; import software.amazon.awssdk.core.internal.http.pipeline.stages.SigningStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.TimeoutExceptionHandlingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.UnwrapResponseContainer; @@ -182,7 +182,7 @@ public OutputT execute(HttpResponseHandler> response .wrappedWith(ApiCallAttemptTimeoutTrackingStage::new) .wrappedWith(TimeoutExceptionHandlingStage::new) .wrappedWith((deps, wrapped) -> new ApiCallAttemptMetricCollectionStage<>(wrapped)) - .wrappedWith(RetryableStage::new)::build) + .wrappedWith(RetryableStage2::new)::build) .wrappedWith(StreamManagingStage::new) .wrappedWith(ApiCallTimeoutTrackingStage::new)::build) .wrappedWith((deps, wrapped) -> new ApiCallMetricCollectionStage<>(wrapped)) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java index ee592eaeefde..4bef1c159e4a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java @@ -26,7 +26,6 @@ import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.util.SdkUserAgent; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpFullRequest; @@ -65,7 +64,7 @@ public static String resolveClientUserAgent(String userAgentPrefix, ClientType clientType, SdkHttpClient syncHttpClient, SdkAsyncHttpClient asyncHttpClient, - RetryPolicy retryPolicy) { + String retryMode) { String awsExecutionEnvironment = SdkSystemSetting.AWS_EXECUTION_ENV.getStringValue().orElse(null); StringBuilder userAgent = new StringBuilder(128); @@ -98,11 +97,8 @@ public static String resolveClientUserAgent(String userAgentPrefix, userAgent.append(SPACE) .append(HTTP) .append("/") - .append(SdkHttpUtils.urlEncode(clientName(clientType, syncHttpClient, asyncHttpClient))); - - String retryMode = retryPolicy.retryMode().toString(); - - userAgent.append(SPACE) + .append(SdkHttpUtils.urlEncode(clientName(clientType, syncHttpClient, asyncHttpClient))) + .append(SPACE) .append(CONFIG) .append("/") .append(RETRY_MODE) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage2.java new file mode 100644 index 000000000000..ff598d27832e --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage2.java @@ -0,0 +1,155 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.io.IOException; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledExecutorService; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.TransformingAsyncResponseHandler; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper2; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +/** + * Wrapper around the pipeline for a single request to provide retry, clockskew and request throttling functionality. + */ +@SdkInternalApi +public final class AsyncRetryableStage2 implements RequestPipeline>> { + + private final TransformingAsyncResponseHandler> responseHandler; + private final RequestPipeline>> requestPipeline; + private final ScheduledExecutorService scheduledExecutor; + private final HttpClientDependencies dependencies; + + public AsyncRetryableStage2(TransformingAsyncResponseHandler> responseHandler, + HttpClientDependencies dependencies, + RequestPipeline>> requestPipeline) { + this.responseHandler = responseHandler; + this.dependencies = dependencies; + this.scheduledExecutor = dependencies.clientConfiguration().option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); + this.requestPipeline = requestPipeline; + } + + @Override + public CompletableFuture> execute(SdkHttpFullRequest request, + RequestExecutionContext context) throws Exception { + return new RetryingExecutor(request, context).execute(); + } + + private final class RetryingExecutor { + private final AsyncRequestBody originalRequestBody; + private final RequestExecutionContext context; + private final RetryableStageHelper2 retryableStageHelper; + + private RetryingExecutor(SdkHttpFullRequest request, RequestExecutionContext context) { + this.originalRequestBody = context.requestProvider(); + this.context = context; + this.retryableStageHelper = new RetryableStageHelper2(request, context, dependencies); + } + + public CompletableFuture> execute() { + CompletableFuture> future = new CompletableFuture<>(); + attemptFirstExecute(future); + return future; + } + + public void attemptFirstExecute(CompletableFuture> future) { + Duration backoffDelay = retryableStageHelper.acquireInitialToken(); + if (backoffDelay.isZero()) { + attemptExecute(future); + } else { + retryableStageHelper.logBackingOff(backoffDelay); + long totalDelayMillis = backoffDelay.toMillis(); + scheduledExecutor.schedule(() -> attemptExecute(future), totalDelayMillis, MILLISECONDS); + } + } + + private void attemptExecute(CompletableFuture> future) { + CompletableFuture> responseFuture; + try { + retryableStageHelper.startingAttempt(); + retryableStageHelper.logSendingRequest(); + responseFuture = requestPipeline.execute(retryableStageHelper.requestToSend(), context); + + // If the result future fails, go ahead and fail the response future. + CompletableFutureUtils.forwardExceptionTo(future, responseFuture); + } catch (SdkException | IOException e) { + maybeRetryExecute(future, e); + return; + } catch (Throwable e) { + future.completeExceptionally(e); + return; + } + + responseFuture.whenComplete((response, exception) -> { + if (exception != null) { + if (exception instanceof Exception) { + maybeRetryExecute(future, (Exception) exception); + } else { + future.completeExceptionally(exception); + } + return; + } + + retryableStageHelper.setLastResponse(response.httpResponse()); + if (!response.isSuccess()) { + retryableStageHelper.adjustClockIfClockSkew(response); + maybeRetryExecute(future, response.exception()); + return; + } + + retryableStageHelper.recordAttemptSucceeded(); + future.complete(response); + }); + } + + public void maybeAttemptExecute(CompletableFuture> future) { + Optional delay = retryableStageHelper.tryRefreshToken(Duration.ZERO); + if (!delay.isPresent()) { + future.completeExceptionally(retryableStageHelper.retryPolicyDisallowedRetryException()); + return; + } + // We failed the last attempt, but will retry. The response handler wants to know when that happens. + responseHandler.onError(retryableStageHelper.getLastException()); + + // Reset the request provider to the original one before retries, in case it was modified downstream. + context.requestProvider(originalRequestBody); + + Duration backoffDelay = delay.get(); + retryableStageHelper.logBackingOff(backoffDelay); + long totalDelayMillis = backoffDelay.toMillis(); + scheduledExecutor.schedule(() -> attemptExecute(future), totalDelayMillis, MILLISECONDS); + } + + private void maybeRetryExecute(CompletableFuture> future, Exception exception) { + retryableStageHelper.setLastException(exception); + maybeAttemptExecute(future); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java new file mode 100644 index 000000000000..4c927cd970fb --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import java.io.IOException; +import java.time.Duration; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; +import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper2; +import software.amazon.awssdk.http.SdkHttpFullRequest; + +/** + * Wrapper around the pipeline for a single request to provide retry, clock-skew and request throttling functionality. + */ +@SdkInternalApi +public final class RetryableStage2 implements RequestToResponsePipeline { + private final RequestPipeline> requestPipeline; + private final HttpClientDependencies dependencies; + + public RetryableStage2(HttpClientDependencies dependencies, + RequestPipeline> requestPipeline) { + this.dependencies = dependencies; + this.requestPipeline = requestPipeline; + } + + @Override + public Response execute(SdkHttpFullRequest request, RequestExecutionContext context) throws Exception { + RetryableStageHelper2 retryableStageHelper = new RetryableStageHelper2(request, context, dependencies); + Duration initialDelay = retryableStageHelper.acquireInitialToken(); + TimeUnit.MILLISECONDS.sleep(initialDelay.toMillis()); + while (true) { + try { + retryableStageHelper.startingAttempt(); + Response response = executeRequest(retryableStageHelper, context); + retryableStageHelper.recordAttemptSucceeded(); + return response; + } catch (SdkException | IOException e) { + retryableStageHelper.setLastException(e); + Duration suggestedDelay = suggestedDelay(e); + Optional backoffDelay = retryableStageHelper.tryRefreshToken(suggestedDelay); + if (backoffDelay.isPresent()) { + Duration delay = backoffDelay.get(); + retryableStageHelper.logBackingOff(delay); + TimeUnit.MILLISECONDS.sleep(delay.toMillis()); + } else { + throw retryableStageHelper.retryPolicyDisallowedRetryException(); + } + } + } + } + + private Duration suggestedDelay(Exception e) { + return Duration.ZERO; + } + + /** + * Executes the requests and returns the result. If the response is not successful throws the wrapped exception. + */ + private Response executeRequest(RetryableStageHelper2 retryableStageHelper, + RequestExecutionContext context) throws Exception { + retryableStageHelper.logSendingRequest(); + Response response = requestPipeline.execute(retryableStageHelper.requestToSend(), context); + retryableStageHelper.setLastResponse(response.httpResponse()); + if (!response.isSuccess()) { + retryableStageHelper.adjustClockIfClockSkew(response); + throw response.exception(); + } + return response; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java new file mode 100644 index 000000000000..6639c6d7b3bb --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java @@ -0,0 +1,289 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages.utils; + +import static software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute.EXECUTION_ATTEMPT; +import static software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute.RETRY_TOKEN; +import static software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper.LAST_BACKOFF_DELAY_DURATION; +import static software.amazon.awssdk.core.metrics.CoreMetric.RETRY_COUNT; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletionException; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.SdkStandardLogger; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.RetryableStage; +import software.amazon.awssdk.core.internal.retry.ClockSkewAdjuster; +import software.amazon.awssdk.core.internal.retry.RetryPolicyAdapter; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.core.retry.RetryPolicyContext; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; + +/** + * Contains the logic shared by {@link RetryableStage} and {@link AsyncRetryableStage} when querying and interacting with a + * {@link RetryStrategy}. + */ +@SdkInternalApi +public final class RetryableStageHelper2 { + public static final String SDK_RETRY_INFO_HEADER = "amz-sdk-request"; + private final SdkHttpFullRequest request; + private final RequestExecutionContext context; + private final RetryPolicy retryPolicy; + private RetryPolicyAdapter retryPolicyAdapter; + private final RetryStrategy retryStrategy; + private final HttpClientDependencies dependencies; + private final List exceptionMessageHistory = new ArrayList<>(); + private int attemptNumber = 0; + private SdkHttpResponse lastResponse; + private SdkException lastException; + + public RetryableStageHelper2(SdkHttpFullRequest request, + RequestExecutionContext context, + HttpClientDependencies dependencies) { + this.request = request; + this.context = context; + this.retryPolicy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_POLICY); + this.retryStrategy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_STRATEGY); + this.dependencies = dependencies; + } + + /** + * Invoke when starting a request attempt, before querying the retry policy. + */ + public void startingAttempt() { + ++attemptNumber; + context.executionAttributes().putAttribute(EXECUTION_ATTEMPT, attemptNumber); + } + + /** + * Invoke when starting the first attempt. This method will acquire the initial token and store it as an execution attribute. + * This method returns a delay that the caller have to wait before attempting the first request. If this method returns + * {@link Duration#ZERO} if the calling code does not have to wait. As of today the only strategy that might return a non-zero + * value is {@link AdaptiveRetryStrategy}. + */ + public Duration acquireInitialToken() { + String scope = "GLOBAL"; + AcquireInitialTokenRequest acquireRequest = AcquireInitialTokenRequest.create(scope); + AcquireInitialTokenResponse acquireResponse = retryStrategy().acquireInitialToken(acquireRequest); + RetryToken retryToken = acquireResponse.token(); + Duration delay = acquireResponse.delay(); + context.executionAttributes().putAttribute(RETRY_TOKEN, retryToken); + context.executionAttributes().putAttribute(LAST_BACKOFF_DELAY_DURATION, delay); + return delay; + } + + /** + * Notify the retry strategy that the request attempt succeeded. + */ + public void recordAttemptSucceeded() { + RetryToken retryToken = context.executionAttributes().getAttribute(RETRY_TOKEN); + RecordSuccessRequest recordSuccessRequest = RecordSuccessRequest.create(retryToken); + retryStrategy().recordSuccess(recordSuccessRequest); + context.executionContext().metricCollector().reportMetric(RETRY_COUNT, retriesAttemptedSoFar()); + } + + /** + * Invoked after a failed attempt and before retrying. The returned optional will be non-empty if the client can retry or + * empty if the retry-strategy disallows the retry. The calling code is expected to wait the delay represented in the duration + * if present before retrying the request. + * + * @param suggestedDelay A suggested delay, presumably coming from the server response. The response when present will be at + * least this amount. + * @return An optional time to wait. If the value is not present the retry strategy disallowed the retry and the calling code + * should not retry. + */ + public Optional tryRefreshToken(Duration suggestedDelay) { + RetryToken retryToken = context.executionAttributes().getAttribute(RETRY_TOKEN); + RefreshRetryTokenResponse refreshResponse; + try { + RefreshRetryTokenRequest refreshRequest = RefreshRetryTokenRequest.builder() + .failure(this.lastException) + .token(retryToken) + .suggestedDelay(suggestedDelay) + .build(); + refreshResponse = retryStrategy().refreshRetryToken(refreshRequest); + } catch (TokenAcquisitionFailedException e) { + context.executionAttributes().putAttribute(RETRY_TOKEN, e.token()); + return Optional.empty(); + } + Duration delay = refreshResponse.delay(); + context.executionAttributes().putAttribute(RETRY_TOKEN, refreshResponse.token()); + context.executionAttributes().putAttribute(LAST_BACKOFF_DELAY_DURATION, delay); + return Optional.of(delay); + } + + /** + * Return the exception that should be thrown, because the retry strategy did not allow the request to be retried. + */ + public SdkException retryPolicyDisallowedRetryException() { + context.executionContext().metricCollector().reportMetric(RETRY_COUNT, retriesAttemptedSoFar()); + for (int i = 0; i < exceptionMessageHistory.size() - 1; i++) { + SdkClientException pastException = + SdkClientException.builder() + .message("Request attempt " + (i + 1) + " failure: " + exceptionMessageHistory.get(i)) + .writableStackTrace(false) + .build(); + lastException.addSuppressed(pastException); + } + return lastException; + } + + /** + * Log a message to the user at the debug level to indicate how long we will wait before retrying the request. + */ + public void logBackingOff(Duration backoffDelay) { + SdkStandardLogger.REQUEST_LOGGER.debug(() -> "Retryable error detected. Will retry in " + + backoffDelay.toMillis() + "ms. Request attempt number " + + attemptNumber, lastException); + } + + /** + * Retrieve the request to send to the service, including any detailed retry information headers. + */ + public SdkHttpFullRequest requestToSend() { + // TODO: fixme, we don't longer have this information handy we need to change the interface to access it. + int maxAllowedRetries = 3; + return request.toBuilder() + .putHeader(SDK_RETRY_INFO_HEADER, "attempt=" + attemptNumber + "; max=" + maxAllowedRetries) + .build(); + } + + /** + * Log a message to the user at the debug level to indicate that we are sending the request to the service. + */ + public void logSendingRequest() { + SdkStandardLogger.REQUEST_LOGGER.debug(() -> (isInitialAttempt() ? "Sending" : "Retrying") + " Request: " + request); + } + + /** + * Adjust the client-side clock skew if the provided response indicates that there is a large skew between the client and + * service. This will allow a retried request to be signed with what is likely to be a more accurate time. + */ + public void adjustClockIfClockSkew(Response response) { + ClockSkewAdjuster clockSkewAdjuster = dependencies.clockSkewAdjuster(); + if (!response.isSuccess() && clockSkewAdjuster.shouldAdjust(response.exception())) { + dependencies.updateTimeOffset(clockSkewAdjuster.getAdjustmentInSeconds(response.httpResponse())); + } + } + + /** + * Retrieve the last call failure exception encountered by this execution, updated whenever {@link #setLastException} is + * invoked. + */ + public SdkException getLastException() { + return lastException; + } + + /** + * Update the {@link #getLastException()} value for this helper. This will be used to determine whether the request should be + * retried. + */ + public void setLastException(Throwable lastException) { + if (lastException instanceof CompletionException) { + setLastException(lastException.getCause()); + } else if (lastException instanceof SdkException) { + this.lastException = (SdkException) lastException; + exceptionMessageHistory.add(this.lastException.getMessage()); + } else { + this.lastException = SdkClientException.create("Unable to execute HTTP request: " + lastException.getMessage(), + lastException); + exceptionMessageHistory.add(this.lastException.getMessage()); + } + } + + /** + * Set the last HTTP response returned by the service. This will be used to determine whether the request should be retried. + */ + public void setLastResponse(SdkHttpResponse lastResponse) { + this.lastResponse = lastResponse; + } + + /** + * Returns true if this is the first attempt. + */ + private boolean isInitialAttempt() { + return attemptNumber == 1; + } + + /** + * Retrieve the current attempt number, updated whenever {@link #startingAttempt()} is invoked. + */ + public int getAttemptNumber() { + return attemptNumber; + } + + /** + * Retrieve the number of retries sent so far in the request execution. + */ + private int retriesAttemptedSoFar() { + return Math.max(0, attemptNumber - 1); + } + + /** + * Returns the {@link RetryStrategy} to be used by this class. If there's a client configured retry-policy then an adapter to + * wrap it is returned. This allows this code to be backwards compatible with previously configured retry-policies by the + * calling code. + */ + private RetryStrategy retryStrategy() { + if (retryPolicy != null) { + if (retryPolicyAdapter == null) { + retryPolicyAdapter = RetryPolicyAdapter.builder() + .retryPolicy(this.retryPolicy) + .retryPolicyContext(retryPolicyContext()) + .build(); + } else { + retryPolicyAdapter = retryPolicyAdapter.toBuilder() + .retryPolicyContext(retryPolicyContext()) + .build(); + } + return retryPolicyAdapter; + } + return retryStrategy; + } + + /** + * Creates a RetryPolicyContext to be used when the using the retry policy to strategy adapter. + */ + private RetryPolicyContext retryPolicyContext() { + return RetryPolicyContext.builder() + .request(request) + .originalRequest(context.originalRequest()) + .exception(lastException) + .retriesAttempted(retriesAttemptedSoFar()) + .executionAttributes(context.executionAttributes()) + .httpStatusCode(lastResponse == null ? null : lastResponse.statusCode()) + .build(); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java new file mode 100644 index 000000000000..bbdbfed3d24c --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java @@ -0,0 +1,190 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.retry; + +import java.time.Duration; +import java.util.OptionalDouble; +import java.util.function.Predicate; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.core.retry.RetryPolicyContext; +import software.amazon.awssdk.core.retry.RetryUtils; +import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.RecordSuccessRequest; +import software.amazon.awssdk.retries.api.RecordSuccessResponse; +import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; +import software.amazon.awssdk.retries.api.RefreshRetryTokenResponse; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.api.RetryToken; +import software.amazon.awssdk.retries.api.TokenAcquisitionFailedException; +import software.amazon.awssdk.utils.Validate; + +/** + * Implements the {@link RetryStrategy} interface by wrapping a {@link RetryPolicy} instance. + */ +@SdkInternalApi +public final class RetryPolicyAdapter implements RetryStrategy { + + private final RetryPolicy retryPolicy; + private final RetryPolicyContext retryPolicyContext; + private final RateLimitingTokenBucket rateLimitingTokenBucket; + + private RetryPolicyAdapter(Builder builder) { + this.retryPolicy = Validate.paramNotNull(builder.retryPolicy, "retryPolicy"); + this.retryPolicyContext = Validate.paramNotNull(builder.retryPolicyContext, "retryPolicyContext"); + this.rateLimitingTokenBucket = builder.rateLimitingTokenBucket; + } + + @Override + public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + RetryPolicyAdapterToken token = new RetryPolicyAdapterToken(request.scope()); + return AcquireInitialTokenResponse.create(token, rateLimitingTokenAcquire()); + } + + @Override + public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + RetryPolicyAdapterToken token = getToken(request.token()); + boolean willRetry = retryPolicy.aggregateRetryCondition().shouldRetry(retryPolicyContext); + if (!willRetry) { + retryPolicy.aggregateRetryCondition().requestWillNotBeRetried(retryPolicyContext); + throw new TokenAcquisitionFailedException("Retry policy disallowed retry"); + } + Duration backoffDelay = backoffDelay(); + return RefreshRetryTokenResponse.create(token, backoffDelay); + } + + @Override + public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + RetryPolicyAdapterToken token = getToken(request.token()); + retryPolicy.aggregateRetryCondition().requestSucceeded(retryPolicyContext); + return RecordSuccessResponse.create(token); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + RetryPolicyAdapterToken getToken(RetryToken token) { + return Validate.isInstanceOf(RetryPolicyAdapterToken.class, token, "Object of class %s was not created by this retry " + + "strategy", token.getClass().getName()); + } + + boolean isFastFailRateLimiting() { + return Boolean.TRUE.equals(retryPolicy.isFastFailRateLimiting()); + } + + Duration rateLimitingTokenAcquire() { + if (!isRateLimitingEnabled()) { + return Duration.ZERO; + } + OptionalDouble tokenAcquireTimeSeconds = rateLimitingTokenBucket.acquireNonBlocking(1.0, isFastFailRateLimiting()); + if (!tokenAcquireTimeSeconds.isPresent()) { + String message = "Unable to acquire a send token immediately without waiting. This indicates that ADAPTIVE " + + "retry mode is enabled, fast fail rate limiting is enabled, and that rate limiting is " + + "engaged because of prior throttled requests. The request will not be executed."; + throw new TokenAcquisitionFailedException(message, SdkClientException.create(message)); + } + long tokenAcquireTimeMillis = (long) (tokenAcquireTimeSeconds.getAsDouble() * 1_000); + return Duration.ofMillis(tokenAcquireTimeMillis); + } + + boolean isRateLimitingEnabled() { + return retryPolicy.retryMode() == RetryMode.ADAPTIVE; + } + + boolean isLastExceptionThrottlingException() { + SdkException lastException = retryPolicyContext.exception(); + if (lastException == null) { + return false; + } + + return RetryUtils.isThrottlingException(lastException); + } + + Duration backoffDelay() { + Duration backoffDelay; + if (RetryUtils.isThrottlingException(retryPolicyContext.exception())) { + backoffDelay = retryPolicy.throttlingBackoffStrategy().computeDelayBeforeNextRetry(retryPolicyContext); + } else { + backoffDelay = retryPolicy.backoffStrategy().computeDelayBeforeNextRetry(retryPolicyContext); + } + Duration rateLimitingDelay = rateLimitingTokenAcquire(); + return backoffDelay.plus(rateLimitingDelay); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder implements RetryStrategy.Builder { + private RetryPolicy retryPolicy; + private RetryPolicyContext retryPolicyContext; + private RateLimitingTokenBucket rateLimitingTokenBucket; + + private Builder() { + rateLimitingTokenBucket = new RateLimitingTokenBucket(); + } + + private Builder(RetryPolicyAdapter adapter) { + this.retryPolicy = adapter.retryPolicy; + this.retryPolicyContext = adapter.retryPolicyContext; + this.rateLimitingTokenBucket = adapter.rateLimitingTokenBucket; + } + + @Override + public Builder retryOnException(Predicate shouldRetry) { + throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling retryOnException"); + } + + @Override + public Builder maxAttempts(int maxAttempts) { + throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling retryOnException"); + } + + public Builder retryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; + } + + public Builder rateLimitingTokenBucket(RateLimitingTokenBucket rateLimitingTokenBucket) { + this.rateLimitingTokenBucket = rateLimitingTokenBucket; + return this; + } + + public Builder retryPolicyContext(RetryPolicyContext retryPolicyContext) { + this.retryPolicyContext = retryPolicyContext; + return this; + } + + @Override + public RetryPolicyAdapter build() { + return new RetryPolicyAdapter(this); + } + } + + static class RetryPolicyAdapterToken implements RetryToken { + private final String scope; + + RetryPolicyAdapterToken(String scope) { + this.scope = Validate.paramNotNull(scope, "scope"); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java new file mode 100644 index 000000000000..712db64ed98f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java @@ -0,0 +1,178 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.retry; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.core.retry.RetryUtils; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.LegacyRetryStrategy; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; + +/** + * Retry strategies used by any SDK client. + */ +@SdkPublicApi +public final class SdkDefaultRetryStrategy { + + private SdkDefaultRetryStrategy() { + } + + /** + * Retrieve the default retry strategy for the configured retry mode. + * + * @return the default retry strategy for the configured retry mode. + */ + public static RetryStrategy defaultRetryStrategy() { + return forRetryMode(RetryMode.defaultRetryMode()); + } + + /** + * Retrieve the appropriate retry strategy for the retry mode with AWS-specific conditions added. + * + * @param mode The retry mode for which we want the retry strategy + * @return the appropriate retry strategy for the retry mode with AWS-specific conditions added. + */ + public static RetryStrategy forRetryMode(RetryMode mode) { + switch (mode) { + case STANDARD: + return standardRetryStrategy(); + case ADAPTIVE: + return adaptiveRetryStrategy(); + case LEGACY: + return legacyRetryStrategy(); + default: + throw new IllegalStateException("unknown retry mode: " + mode); + } + } + + /** + * Returns a {@link StandardRetryStrategy} with generic SDK retry conditions. + * + * @return a {@link StandardRetryStrategy} with generic SDK retry conditions. + */ + public static StandardRetryStrategy standardRetryStrategy() { + return standardRetryStrategyBuilder().build(); + } + + /** + * Returns a {@link LegacyRetryStrategy} with generic SDK retry conditions. + * + * @return a {@link LegacyRetryStrategy} with generic SDK retry conditions. + */ + public static LegacyRetryStrategy legacyRetryStrategy() { + return legacyRetryStrategyBuilder().build(); + } + + /** + * Returns an {@link AdaptiveRetryStrategy} with generic SDK retry conditions. + * + * @return an {@link AdaptiveRetryStrategy} with generic SDK retry conditions. + */ + public static AdaptiveRetryStrategy adaptiveRetryStrategy() { + return adaptiveRetryStrategyBuilder().build(); + } + + /** + * Returns a {@link StandardRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + * + * @return a {@link StandardRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + */ + public static StandardRetryStrategy.Builder standardRetryStrategyBuilder() { + StandardRetryStrategy.Builder builder = DefaultRetryStrategy.standardStrategyBuilder(); + return configure(builder); + } + + /** + * Returns a {@link LegacyRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + * + * @return a {@link LegacyRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + */ + public static LegacyRetryStrategy.Builder legacyRetryStrategyBuilder() { + LegacyRetryStrategy.Builder builder = DefaultRetryStrategy.legacyStrategyBuilder(); + return configure(builder) + .treatAsThrottling(SdkDefaultRetryStrategy::treatAsThrottling); + } + + /** + * Returns an {@link AdaptiveRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + * + * @return an {@link AdaptiveRetryStrategy.Builder} with preconfigured generic SDK retry conditions. + */ + public static AdaptiveRetryStrategy.Builder adaptiveRetryStrategyBuilder() { + AdaptiveRetryStrategy.Builder builder = DefaultRetryStrategy.adaptiveStrategyBuilder(); + return configure(builder) + .treatAsThrottling(SdkDefaultRetryStrategy::treatAsThrottling); + } + + /** + * Configures a retry strategy using its builder to add SDK-generic retry exceptions. + * + * @param builder The builder to add the SDK-generic retry exceptions + * @return The given builder + * @param The type of the builder extending {@link RetryStrategy.Builder} + */ + + public static > T configure(T builder) { + builder.retryOnException(SdkDefaultRetryStrategy::retryOnRetryableStatusCodes) + .retryOnException(SdkDefaultRetryStrategy::retryOnStatusCodes) + .retryOnException(SdkDefaultRetryStrategy::retryOnClockSkewException) + .retryOnException(SdkDefaultRetryStrategy::retryOnThrottlingCondition); + SdkDefaultRetrySetting.RETRYABLE_EXCEPTIONS.forEach(builder::retryOnExceptionOrCauseInstanceOf); + return builder; + } + + private static boolean treatAsThrottling(Throwable t) { + if (t instanceof SdkException) { + return RetryUtils.isThrottlingException((SdkException) t); + } + return false; + } + + private static boolean retryOnStatusCodes(Throwable ex) { + if (ex instanceof SdkServiceException) { + SdkServiceException failure = (SdkServiceException) ex; + return SdkDefaultRetrySetting.RETRYABLE_STATUS_CODES.contains(failure.statusCode()); + } + return false; + } + + private static boolean retryOnClockSkewException(Throwable ex) { + if (ex instanceof SdkException) { + return RetryUtils.isClockSkewException((SdkException) ex); + } + return false; + } + + private static boolean retryOnThrottlingCondition(Throwable ex) { + if (ex instanceof SdkException) { + return RetryUtils.isThrottlingException((SdkException) ex); + } + return false; + } + + private static boolean retryOnRetryableStatusCodes(Throwable ex) { + if (ex instanceof SdkServiceException) { + SdkServiceException exception = (SdkServiceException) ex; + return SdkDefaultRetrySetting.RETRYABLE_STATUS_CODES.contains(exception.statusCode()); + } + return false; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java index bc8943cedbc0..5624d04860cc 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java @@ -61,6 +61,7 @@ private FullJitterBackoffStrategy(BuilderImpl builder) { @Override public Duration computeDelayBeforeNextRetry(RetryPolicyContext context) { int ceil = calculateExponentialDelay(context.retriesAttempted(), baseDelay, maxBackoffTime); + // Fixme, for the new interfaces should we still have a min of 1ms?? 😬 // Minimum of 1 ms (consistent with BackoffStrategy.none()'s behavior) return Duration.ofMillis(random.nextInt(ceil) + 1L); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java index 4f4334282fb6..f05f5f72759d 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java @@ -51,6 +51,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import utils.HttpTestUtils; import utils.ValidSdkObjects; @@ -87,6 +88,7 @@ public void methodSetup() throws Exception { SdkClientConfiguration config = HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.ASYNC_HTTP_CLIENT, asyncHttpClient) .option(SdkClientOption.RETRY_POLICY, RetryPolicy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, Runnable::run) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java index 5cfaa875741b..055d6791a8ea 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java @@ -49,6 +49,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import utils.HttpTestUtils; import utils.ValidSdkObjects; @@ -145,6 +146,7 @@ public SdkClientConfiguration clientConfiguration() { return HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.ASYNC_HTTP_CLIENT, httpClient) .option(SdkClientOption.RETRY_POLICY, RetryPolicy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/AmazonHttpClientTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/AmazonHttpClientTest.java index fc87d37f585b..859a2ac4eaca 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/AmazonHttpClientTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/http/AmazonHttpClientTest.java @@ -134,7 +134,7 @@ public void testUserAgentPrefixAndSuffixAreAdded() { String clientUserAgent = ApplyUserAgentStage.resolveClientUserAgent(prefix, "", ClientType.SYNC, sdkHttpClient, null, - RetryPolicy.forRetryMode(RetryMode.STANDARD)); + RetryMode.STANDARD.toString()); SdkClientConfiguration config = HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkAdvancedClientOption.USER_AGENT_SUFFIX, suffix) @@ -165,7 +165,7 @@ public void testUserAgentContainsHttpClientInfo() { String clientUserAgent = ApplyUserAgentStage.resolveClientUserAgent(null, null, ClientType.SYNC, sdkHttpClient, null, - RetryPolicy.forRetryMode(RetryMode.STANDARD)); + RetryMode.STANDARD.toString()); SdkClientConfiguration config = HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, sdkHttpClient) .option(SdkClientOption.CLIENT_TYPE, ClientType.SYNC) @@ -195,7 +195,7 @@ public void testUserAgentContainsRetryModeInfo() { String clientUserAgent = ApplyUserAgentStage.resolveClientUserAgent(null, null, ClientType.SYNC, sdkHttpClient, null, - RetryPolicy.forRetryMode(RetryMode.STANDARD)); + RetryMode.STANDARD.toString()); SdkClientConfiguration config = HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.CLIENT_USER_AGENT, clientUserAgent) diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/response/NullErrorResponseHandler.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/response/NullErrorResponseHandler.java index 9531fe2ad2d5..97916c106ed1 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/response/NullErrorResponseHandler.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/response/NullErrorResponseHandler.java @@ -25,7 +25,9 @@ public class NullErrorResponseHandler implements HttpResponseHandler()) .option(SdkClientOption.ENDPOINT, URI.create("http://localhost:8080")) .option(SdkClientOption.RETRY_POLICY, RetryPolicy.defaultRetryPolicy()) + .option(SdkClientOption.RETRY_STRATEGY, + SdkDefaultRetryStrategy.defaultRetryStrategy()) .option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, new HashMap<>()) .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) .option(SdkAdvancedClientOption.SIGNER, new NoOpSigner()) @@ -83,6 +88,7 @@ public static SdkClientConfiguration testClientConfiguration() { public static class TestClientBuilder { private RetryPolicy retryPolicy; + private RetryStrategy retryStrategy; private SdkHttpClient httpClient; private Map additionalHeaders = new HashMap<>(); private Duration apiCallTimeout; @@ -93,6 +99,11 @@ public TestClientBuilder retryPolicy(RetryPolicy retryPolicy) { return this; } + public TestClientBuilder retryStrategy(RetryStrategy retryStrategy) { + this.retryStrategy = retryStrategy; + return this; + } + public TestClientBuilder httpClient(SdkHttpClient sdkHttpClient) { this.httpClient = sdkHttpClient; return this; @@ -118,16 +129,18 @@ public AmazonSyncHttpClient build() { return new AmazonSyncHttpClient(testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, sdkHttpClient) .applyMutation(this::configureRetryPolicy) + .applyMutation(this::configureRetryStrategy) .applyMutation(this::configureAdditionalHeaders) .option(SdkClientOption.API_CALL_TIMEOUT, apiCallTimeout) - .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, apiCallAttemptTimeout) + .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, + apiCallAttemptTimeout) .build()); } private void configureAdditionalHeaders(SdkClientConfiguration.Builder builder) { Map> headers = - this.additionalHeaders.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> Arrays.asList(e.getValue()))); + this.additionalHeaders.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> Arrays.asList(e.getValue()))); builder.option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, headers); } @@ -137,10 +150,17 @@ private void configureRetryPolicy(SdkClientConfiguration.Builder builder) { builder.option(SdkClientOption.RETRY_POLICY, retryPolicy); } } + + private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { + if (retryStrategy != null) { + builder.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); + } + } } public static class TestAsyncClientBuilder { private RetryPolicy retryPolicy; + private RetryStrategy retryStrategy; private SdkAsyncHttpClient asyncHttpClient; private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; @@ -151,6 +171,11 @@ public TestAsyncClientBuilder retryPolicy(RetryPolicy retryPolicy) { return this; } + public TestAsyncClientBuilder retryStrategy(RetryStrategy retryStrategy) { + this.retryStrategy = retryStrategy; + return this; + } + public TestAsyncClientBuilder asyncHttpClient(SdkAsyncHttpClient asyncHttpClient) { this.asyncHttpClient = asyncHttpClient; return this; @@ -176,8 +201,10 @@ public AmazonAsyncHttpClient build() { return new AmazonAsyncHttpClient(testClientConfiguration().toBuilder() .option(SdkClientOption.ASYNC_HTTP_CLIENT, asyncHttpClient) .option(SdkClientOption.API_CALL_TIMEOUT, apiCallTimeout) - .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, apiCallAttemptTimeout) + .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, + apiCallAttemptTimeout) .applyMutation(this::configureRetryPolicy) + .applyMutation(this::configureRetryStrategy) .applyMutation(this::configureAdditionalHeaders) .build()); } @@ -195,5 +222,11 @@ private void configureRetryPolicy(SdkClientConfiguration.Builder builder) { builder.option(SdkClientOption.RETRY_POLICY, retryPolicy); } } + + private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { + if (retryStrategy != null) { + builder.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); + } + } } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java index 2644227b9e1a..1b709e524cb9 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java @@ -82,8 +82,10 @@ public void retryAttemptAndMaxAreCorrect() { assertThat(requests).hasSize(2); assertThat(retryComponent(requests.get(0), "attempt")).isEqualTo("1"); assertThat(retryComponent(requests.get(1), "attempt")).isEqualTo("2"); + /* TODO: fixme, we do not set the max field correctly at the moment assertThat(retryComponent(requests.get(0), "max")).isEqualTo("3"); assertThat(retryComponent(requests.get(1), "max")).isEqualTo("3"); + */ } private String invocationId(SdkHttpRequest request) { From 5b116d2b46351d4ed3deb1aec2017f42de741440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 16 Jun 2023 13:35:49 -0700 Subject: [PATCH 14/32] Update sdk version --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 84d544f7e001..60f4f82d5116 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.78-SNAPSHOT + 2.20.88-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 94256847313c..6247aab313bc 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.78-SNAPSHOT + 2.20.88-SNAPSHOT 4.0.0 From 43fcfef64e43beffe37a1c5339c643bfdd0ecfa3 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Fri, 23 Jun 2023 11:44:39 -0700 Subject: [PATCH 15/32] Change uses of RetryPolicy to RetryStrategy (#4125) --- .../customization/CustomizationConfig.java | 13 ++++ .../model/intermediate/IntermediateModel.java | 4 ++ .../poet/builder/BaseClientBuilderClass.java | 6 ++ .../builder/test-client-builder-class.java | 2 + .../poet/client/c2j/json/customization.config | 1 + .../client/c2j/rest-json/customization.config | 1 + .../builder/AwsDefaultClientBuilder.java | 20 ------ .../client/builder/DefaultsModeTest.java | 8 ++- .../awssdk/retries/api/RetryStrategy.java | 7 +++ .../retries/api/RetryStrategyBuilderTest.java | 6 ++ .../retries/internal/BaseRetryStrategy.java | 5 ++ .../builder/SdkDefaultClientBuilder.java | 15 ----- .../config/SdkClientOptionValidation.java | 2 - .../http/pipeline/stages/RetryableStage2.java | 61 ++++++++++++++++++- .../stages/utils/RetryableStageHelper2.java | 5 +- .../internal/retry/RetryPolicyAdapter.java | 5 ++ .../retry/SdkDefaultRetryStrategy.java | 30 ++++++--- services/dynamodb/pom.xml | 5 ++ .../dynamodb/DynamoDbRetryPolicy.java | 26 +++++++- .../dynamodb/customization.config | 2 +- test/codegen-generated-classes-test/pom.xml | 5 ++ .../services/metrics/CoreMetricsTest.java | 10 +-- .../metrics/async/AsyncCoreMetricsTest.java | 3 +- .../AsyncEventStreamingCoreMetricsTest.java | 2 +- .../async/AsyncStreamingCoreMetricsTest.java | 3 +- .../async/BaseAsyncCoreMetricsTest.java | 13 +++- .../services/retry/AsyncRetryHeaderTest.java | 2 +- .../retry/ClientRetryModeTestSuite.java | 10 +-- .../services/retry/RetryHeaderTestSuite.java | 2 - .../services/retry/SyncRetryHeaderTest.java | 2 +- test/protocol-tests/pom.xml | 10 +++ .../tests/AsyncResponseThreadingTest.java | 6 +- .../clockskew/ClockSkewAdjustmentTest.java | 4 +- .../SyncClientConnectionInterruptionTest.java | 7 ++- .../tests/retry/AsyncAwsJsonRetryTest.java | 6 +- .../tests/retry/AwsJsonRetryTest.java | 5 +- .../AsyncApiCallAttemptsTimeoutTest.java | 13 ++-- .../async/AsyncApiCallTimeoutTest.java | 17 +++--- .../sync/SyncApiCallAttemptTimeoutTest.java | 13 ++-- .../timeout/sync/SyncApiCallTimeoutTest.java | 15 ++--- ...ingOperationApiCallAttemptTimeoutTest.java | 4 +- ...cStreamingOperationApiCallTimeoutTest.java | 4 +- 42 files changed, 263 insertions(+), 117 deletions(-) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 6913bc6e83f8..c01d034efc48 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -130,6 +130,11 @@ public class CustomizationConfig { */ private String customRetryPolicy; + /** + * Custom Retry strategy + */ + private String customRetryStrategy; + private boolean skipSyncClientGeneration; /** @@ -416,10 +421,18 @@ public String getCustomRetryPolicy() { return customRetryPolicy; } + public String getCustomRetryStrategy() { + return customRetryStrategy; + } + public void setCustomRetryPolicy(String customRetryPolicy) { this.customRetryPolicy = customRetryPolicy; } + public void setCustomRetryStrategy(String customRetryStrategy) { + this.customRetryStrategy = customRetryStrategy; + } + public boolean isSkipSyncClientGeneration() { return skipSyncClientGeneration; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java index 88300c9aacb2..b49462dfe68e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java @@ -205,6 +205,10 @@ public String getCustomRetryPolicy() { return customizationConfig.getCustomRetryPolicy(); } + public String getCustomRetryStrategy() { + return customizationConfig.getCustomRetryStrategy(); + } + public String getSdkModeledExceptionBaseFqcn() { return String.format("%s.%s", metadata.getFullModelPackageName(), diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index 1be4d730040e..e0d9da25e08f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -342,6 +342,12 @@ private MethodSpec finalizeServiceConfigurationMethod() { PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryPolicy())); } + if (StringUtils.isNotBlank(model.getCustomizationConfig().getCustomRetryStrategy())) { + builder.addCode(".option($1T.RETRY_STRATEGY, $2T.resolveRetryStrategy(config))", + SdkClientOption.class, + PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryStrategy())); + } + if (StringUtils.isNotBlank(clientConfigClassName)) { builder.addCode(".option($T.SERVICE_CONFIGURATION, finalServiceConfig)", SdkClientOption.class); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java index dc3a1ed74d74..51dad2873838 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java @@ -4,6 +4,7 @@ import java.util.List; import software.amazon.MyServiceHttpConfig; import software.amazon.MyServiceRetryPolicy; +import software.amazon.MyServiceRetryStrategy; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.signer.Aws4Signer; @@ -126,6 +127,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon .option(AwsClientOption.FIPS_ENDPOINT_ENABLED, finalServiceConfig.fipsModeEnabled()) .option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) .option(SdkClientOption.RETRY_POLICY, MyServiceRetryPolicy.resolveRetryPolicy(config)) + .option(SdkClientOption.RETRY_STRATEGY, MyServiceRetryStrategy.resolveRetryStrategy(config)) .option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config index af7f5cb45640..eaab59be5b20 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config @@ -10,6 +10,7 @@ "hasFipsProperty": true }, "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", + "customRetryStrategy": "software.amazon.MyServiceRetryStrategy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], "blacklistedSimpleMethods" : [ "eventStreamOperation" diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config index e13cec317ff6..1279a8974876 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config @@ -14,6 +14,7 @@ "hasAccelerateModeEnabledProperty":true }, "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", + "customRetryStrategy": "software.amazon.MyServiceRetryStrategy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], "blacklistedSimpleMethods" : [ "eventStreamOperation" diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index 8460e4975f1e..28c3c6b4f678 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -372,28 +372,8 @@ private RetryPolicy resolveAwsRetryPolicy(SdkClientConfiguration config) { } } - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) - .resolve(); - return AwsRetryPolicy.forRetryMode(retryMode); - // TODO: fixme This will be changed like this to pick the configured retry strategy - // if no retry policy is configured. - /* - RetryPolicy policy = config.option(SdkClientOption.RETRY_POLICY); - - if (policy != null) { - if (policy.additionalRetryConditionsAllowed()) { - return AwsRetryPolicy.addRetryConditions(policy); - } else { - return policy; - } - } - // If we don't have a configured retry policy we will use the configured retry strategy instead. return null; - */ } private RetryStrategy resolveAwsRetryStrategy(SdkClientConfiguration config) { diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultsModeTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultsModeTest.java index 85c4a9f5ca54..2b9ea42a5d73 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultsModeTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/builder/DefaultsModeTest.java @@ -23,6 +23,7 @@ import static software.amazon.awssdk.awscore.client.config.AwsClientOption.DEFAULTS_MODE; import static software.amazon.awssdk.core.client.config.SdkClientOption.DEFAULT_RETRY_MODE; import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_POLICY; +import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_STRATEGY; import static software.amazon.awssdk.regions.ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT; import java.time.Duration; @@ -36,6 +37,7 @@ import software.amazon.awssdk.awscore.internal.defaultsmode.DefaultsModeConfiguration; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpConfigurationOption; @@ -75,7 +77,7 @@ public void defaultClient_shouldUseLegacyModeWithExistingDefaults() { .build(); assertThat(client.clientConfiguration.option(DEFAULTS_MODE)).isEqualTo(DefaultsMode.LEGACY); - assertThat(client.clientConfiguration.option(RETRY_POLICY).retryMode()).isEqualTo(RetryMode.defaultRetryMode()); + assertThat(SdkDefaultRetryStrategy.retryMode(client.clientConfiguration.option(RETRY_STRATEGY))).isEqualTo(RetryMode.defaultRetryMode()); assertThat(client.clientConfiguration.option(DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)).isNull(); } @@ -97,7 +99,7 @@ public void nonLegacyDefaultsMode_shouldApplySdkDefaultsAndHttpDefaults() { AttributeMap attributes = DefaultsModeConfiguration.defaultConfig(targetMode); - assertThat(client.clientConfiguration.option(RETRY_POLICY).retryMode()).isEqualTo(attributes.get(DEFAULT_RETRY_MODE)); + assertThat(SdkDefaultRetryStrategy.retryMode(client.clientConfiguration.option(RETRY_STRATEGY))).isEqualTo(attributes.get(DEFAULT_RETRY_MODE)); assertThat(client.clientConfiguration.option(DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)).isEqualTo("regional"); } @@ -119,7 +121,7 @@ public void nonLegacyDefaultsModeAsyncClient_shouldApplySdkDefaultsAndHttpDefaul AttributeMap attributes = DefaultsModeConfiguration.defaultConfig(targetMode); - assertThat(client.clientConfiguration.option(RETRY_POLICY).retryMode()).isEqualTo(attributes.get(DEFAULT_RETRY_MODE)); + assertThat(SdkDefaultRetryStrategy.retryMode(client.clientConfiguration.option(RETRY_STRATEGY))).isEqualTo(attributes.get(DEFAULT_RETRY_MODE)); } @Test diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java index 6361ffe171cf..cc37143b2bb6 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -83,6 +83,13 @@ public interface RetryStrategy< */ RecordSuccessResponse recordSuccess(RecordSuccessRequest request); + /** + * Returns the maximum numbers attempts that this retry policy will allow. + * + * @return the maximum numbers attempts that this retry policy will allow. + */ + int maxAttempts(); + /** * Create a new {@link Builder} with the current configuration. * diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index 016d9f56f50e..227a57e6b2ea 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -176,10 +176,16 @@ public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { return null; } + @Override + public int maxAttempts() { + return 0; + } + @Override public BuilderToTestDefaults toBuilder() { return null; } + } } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java index 4b460137d184..f66f1e68a729 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java @@ -138,6 +138,11 @@ public final RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { return RecordSuccessResponse.create(refreshedToken); } + @Override + public int maxAttempts() { + return maxAttempts; + } + @Override public abstract B toBuilder(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 2dfc816c10a0..f612d017d8ea 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -358,22 +358,7 @@ private String resolveClientUserAgent(SdkClientConfiguration config, String retr } private RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { - RetryPolicy policy = config.option(SdkClientOption.RETRY_POLICY); - if (policy != null) { - return policy; - } - - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) - .resolve(); - return RetryPolicy.forRetryMode(retryMode); - // TODO: fixme This will be changed like this to pick the configured retry strategy - // if no retry policy is configured. - /* return config.option(SdkClientOption.RETRY_POLICY); - */ } private RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java index 56203c459fdc..638baf938217 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOptionValidation.java @@ -46,8 +46,6 @@ private static void validateClientOptions(SdkClientConfiguration c) { require("overrideConfiguration.additionalHttpHeaders", c.option(SdkClientOption.ADDITIONAL_HTTP_HEADERS)); require("overrideConfiguration.executionInterceptors", c.option(SdkClientOption.EXECUTION_INTERCEPTORS)); - // TODO: fixme, this will be removed as retryPolicy will be optional - require("overrideConfiguration.retryPolicy", c.option(SdkClientOption.RETRY_POLICY)); require("overrideConfiguration.retryStrategy", c.option(SdkClientOption.RETRY_STRATEGY)); require("overrideConfiguration.advancedOption[USER_AGENT_PREFIX]", diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java index 4c927cd970fb..d12c444afa73 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/RetryableStage2.java @@ -28,12 +28,14 @@ import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper2; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpFullResponse; /** * Wrapper around the pipeline for a single request to provide retry, clock-skew and request throttling functionality. */ @SdkInternalApi public final class RetryableStage2 implements RequestToResponsePipeline { + private static final String RETRY_AFTER_HEADER = "Retry-After"; private final RequestPipeline> requestPipeline; private final HttpClientDependencies dependencies; @@ -54,8 +56,13 @@ public Response execute(SdkHttpFullRequest request, RequestExecutionCon Response response = executeRequest(retryableStageHelper, context); retryableStageHelper.recordAttemptSucceeded(); return response; - } catch (SdkException | IOException e) { - retryableStageHelper.setLastException(e); + } catch (SdkExceptionWithRetryAfterHint | SdkException | IOException e) { + Throwable throwable = e; + if (e instanceof SdkExceptionWithRetryAfterHint) { + SdkExceptionWithRetryAfterHint wrapper = (SdkExceptionWithRetryAfterHint) e; + throwable = wrapper.cause(); + } + retryableStageHelper.setLastException(throwable); Duration suggestedDelay = suggestedDelay(e); Optional backoffDelay = retryableStageHelper.tryRefreshToken(suggestedDelay); if (backoffDelay.isPresent()) { @@ -70,6 +77,10 @@ public Response execute(SdkHttpFullRequest request, RequestExecutionCon } private Duration suggestedDelay(Exception e) { + if (e instanceof SdkExceptionWithRetryAfterHint) { + SdkExceptionWithRetryAfterHint except = (SdkExceptionWithRetryAfterHint) e; + return Duration.ofSeconds(except.retryAfter()); + } return Duration.ZERO; } @@ -83,8 +94,52 @@ private Response executeRequest(RetryableStageHelper2 retryableStageHel retryableStageHelper.setLastResponse(response.httpResponse()); if (!response.isSuccess()) { retryableStageHelper.adjustClockIfClockSkew(response); - throw response.exception(); + throw responseException(response); } return response; } + + private RuntimeException responseException(Response response) { + Optional optionalRetryAfter = retryAfter(response.httpResponse()); + if (optionalRetryAfter.isPresent()) { + return new SdkExceptionWithRetryAfterHint(optionalRetryAfter.get(), response.exception()); + } + return response.exception(); + } + + private Optional retryAfter(SdkHttpFullResponse response) { + Optional optionalRetryAfterHeader = response.firstMatchingHeader(RETRY_AFTER_HEADER); + if (optionalRetryAfterHeader.isPresent()) { + String retryAfterHeader = optionalRetryAfterHeader.get(); + try { + return Optional.of(Integer.parseInt(retryAfterHeader)); + } catch (NumberFormatException e) { + // Ignore and fallback to returning empty. + } + } + return Optional.empty(); + } + + // This probably should go directly into SdkException + static class SdkExceptionWithRetryAfterHint extends RuntimeException { + private final SdkException cause; + private final int seconds; + + SdkExceptionWithRetryAfterHint(int seconds, SdkException cause) { + this.seconds = seconds; + this.cause = cause; + } + + public int retryAfter() { + return seconds; + } + + public SdkException cause() { + return cause; + } + + public int seconds() { + return seconds; + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java index 6639c6d7b3bb..8022ee50ab5c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java @@ -173,10 +173,9 @@ public void logBackingOff(Duration backoffDelay) { * Retrieve the request to send to the service, including any detailed retry information headers. */ public SdkHttpFullRequest requestToSend() { - // TODO: fixme, we don't longer have this information handy we need to change the interface to access it. - int maxAllowedRetries = 3; return request.toBuilder() - .putHeader(SDK_RETRY_INFO_HEADER, "attempt=" + attemptNumber + "; max=" + maxAllowedRetries) + .putHeader(SDK_RETRY_INFO_HEADER, "attempt=" + attemptNumber + + "; max=" + retryStrategy().maxAttempts()) .build(); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java index bbdbfed3d24c..35a941103486 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java @@ -77,6 +77,11 @@ public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { return RecordSuccessResponse.create(token); } + @Override + public int maxAttempts() { + return retryPolicy.numRetries() + 1; + } + @Override public Builder toBuilder() { return new Builder(this); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java index 712db64ed98f..3ba667110f92 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java @@ -63,6 +63,25 @@ private SdkDefaultRetryStrategy() { } } + /** + * Returns the {@link RetryMode} for the given retry strategy. + * + * @param retryStrategy The retry strategy to test for + * @return The retry mode for the given strategy + */ + public static RetryMode retryMode(RetryStrategy retryStrategy) { + if (retryStrategy instanceof StandardRetryStrategy) { + return RetryMode.STANDARD; + } + if (retryStrategy instanceof AdaptiveRetryStrategy) { + return RetryMode.ADAPTIVE; + } + if (retryStrategy instanceof LegacyRetryStrategy) { + return RetryMode.LEGACY; + } + throw new IllegalArgumentException("unknown retry strategy class: " + retryStrategy.getClass().getName()); + } + /** * Returns a {@link StandardRetryStrategy} with generic SDK retry conditions. * @@ -131,8 +150,7 @@ public static AdaptiveRetryStrategy.Builder adaptiveRetryStrategyBuilder() { */ public static > T configure(T builder) { - builder.retryOnException(SdkDefaultRetryStrategy::retryOnRetryableStatusCodes) - .retryOnException(SdkDefaultRetryStrategy::retryOnStatusCodes) + builder.retryOnException(SdkDefaultRetryStrategy::retryOnStatusCodes) .retryOnException(SdkDefaultRetryStrategy::retryOnClockSkewException) .retryOnException(SdkDefaultRetryStrategy::retryOnThrottlingCondition); SdkDefaultRetrySetting.RETRYABLE_EXCEPTIONS.forEach(builder::retryOnExceptionOrCauseInstanceOf); @@ -167,12 +185,4 @@ private static boolean retryOnThrottlingCondition(Throwable ex) { } return false; } - - private static boolean retryOnRetryableStatusCodes(Throwable ex) { - if (ex instanceof SdkServiceException) { - SdkServiceException exception = (SdkServiceException) ex; - return SdkDefaultRetrySetting.RETRYABLE_STATUS_CODES.contains(exception.statusCode()); - } - return false; - } } diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 8336beef3685..2fc222842153 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -62,6 +62,11 @@ profiles ${awsjavasdk.version} + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + sts software.amazon.awssdk diff --git a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java index 3830c23cef5f..f34bacbbcae5 100644 --- a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java +++ b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java @@ -18,6 +18,7 @@ import java.time.Duration; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.retry.AwsRetryPolicy; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetrySetting; @@ -25,6 +26,7 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; import software.amazon.awssdk.core.retry.backoff.FullJitterBackoffStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; /** @@ -37,7 +39,12 @@ final class DynamoDbRetryPolicy { * Default max retry count for DynamoDB client, regardless of retry mode. **/ private static final int MAX_ERROR_RETRY = 8; - + + /** + * Default attempts count for DynamoDB client, regardless of retry mode. + **/ + private static final int MAX_ATTEMPTS = MAX_ERROR_RETRY + 1; + /** * Default base sleep time for DynamoDB, regardless of retry mode. **/ @@ -76,4 +83,21 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { .backoffStrategy(BACKOFF_STRATEGY) .build(); } + + public static RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { + RetryStrategy configuredRetryStrategy = config.option(SdkClientOption.RETRY_STRATEGY); + if (configuredRetryStrategy != null) { + return configuredRetryStrategy; + } + + RetryMode retryMode = RetryMode.resolver() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) + .resolve(); + return AwsRetryStrategy.forRetryMode(retryMode) + .toBuilder() + .maxAttempts(MAX_ATTEMPTS) + .build(); + } } diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config index 43c388445c29..a424e2c84348 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config @@ -33,6 +33,6 @@ "listWebACLs", "listXssMatchSets" ], - "customRetryPolicy" : "software.amazon.awssdk.services.dynamodb.DynamoDbRetryPolicy", + "customRetryStrategy" : "software.amazon.awssdk.services.dynamodb.DynamoDbRetryPolicy", "enableEndpointDiscoveryMethodRequired": true } diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 1f8b6511a03b..6a6e7f2b0fcd 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -120,6 +120,11 @@ profiles ${awsjavasdk.version} + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + netty-nio-client software.amazon.awssdk diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java index c2b701217cf1..879900b70dd0 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -39,8 +39,8 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.core.exception.SdkException; -import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.internal.metrics.SdkErrorType; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; @@ -62,6 +62,8 @@ public class CoreMetricsTest { private static final String REQUEST_ID = "req-id"; private static final String EXTENDED_REQUEST_ID = "extended-id"; private static final int MAX_RETRIES = 2; + private static final int MAX_ATTEMPTS = MAX_RETRIES + 1; + private static ProtocolRestJsonClient client; @@ -83,7 +85,8 @@ public void setup() throws IOException { .httpClient(mockHttpClient) .region(Region.US_WEST_2) .credentialsProvider(mockCredentialsProvider) - .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) + .retryStrategy(b -> b.maxAttempts(MAX_ATTEMPTS))) .build(); AbortableInputStream content = contentStream("{}"); SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() @@ -233,7 +236,7 @@ public void testApiCall_serviceReturnsError_errorInfoIncludedInMetrics() throws MetricCollection capturedCollection = collectionCaptor.getValue(); - assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + assertThat(capturedCollection.children()).hasSize(MAX_ATTEMPTS); assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(MAX_RETRIES); assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(false); @@ -280,7 +283,6 @@ public void testApiCall_httpClientThrowsNetworkError_errorTypeIncludedInMetrics( } } - private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); when(mockResponse.httpResponse()).thenReturn(httpResponse); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java index 7649fbae6fb0..c06391314ed9 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java @@ -72,7 +72,8 @@ public void setup() throws IOException { .region(Region.US_WEST_2) .credentialsProvider(mockCredentialsProvider) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) + .retryStrategy(b -> b.maxAttempts(MAX_ATTEMPTS))) .build(); when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java index 534217c04110..594293412643 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java @@ -61,7 +61,7 @@ public void setup() { .credentialsProvider(mockCredentialsProvider) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) - .retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .retryStrategy(b -> b.maxAttempts(MAX_ATTEMPTS))) .build(); when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java index 15b1aa5bf129..fa8677e4ce23 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java @@ -59,7 +59,8 @@ public void setup() throws IOException { .region(Region.US_WEST_2) .credentialsProvider(mockCredentialsProvider) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) + .retryStrategy(b -> b.maxAttempts(MAX_ATTEMPTS))) .build(); when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java index 4ed2df722d6b..9edb16d2e6b1 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.services.metrics.async; import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; import static com.github.tomakehurst.wiremock.client.WireMock.post; import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; @@ -23,18 +24,20 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.verify; +import com.github.tomakehurst.wiremock.client.WireMock; import com.github.tomakehurst.wiremock.http.Fault; import com.github.tomakehurst.wiremock.stubbing.Scenario; import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; +import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.internal.metrics.SdkErrorType; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; @@ -46,6 +49,7 @@ public abstract class BaseAsyncCoreMetricsTest { private static final String REQUEST_ID = "req-id"; private static final String EXTENDED_REQUEST_ID = "extended-id"; static final int MAX_RETRIES = 2; + static final int MAX_ATTEMPTS = MAX_RETRIES + 1; public static final Duration FIXED_DELAY = Duration.ofMillis(500); @Test @@ -97,7 +101,8 @@ public void apiCall_allRetryAttemptsFailedOfNetworkError() { MetricCollection capturedCollection = collectionCaptor.getValue(); verifyFailedApiCallCollection(capturedCollection); - assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + assertThat(capturedCollection.children()).hasSize(MAX_ATTEMPTS); + WireMock.verify(MAX_ATTEMPTS, anyRequestedFor(anyUrl())); capturedCollection.children().forEach(requestMetrics -> { assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) @@ -159,6 +164,8 @@ private void verifyFailedApiCallAttemptCollection(MetricCollection requestMetric .containsExactly(REQUEST_ID); assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) .containsExactly(EXTENDED_REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).size()) + .isGreaterThan(0); assertThat(requestMetrics.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) @@ -173,6 +180,8 @@ private void verifySuccessfulApiCallAttemptCollection(MetricCollection attemptCo .containsExactly(REQUEST_ID); assertThat(attemptCollection.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) .containsExactly(EXTENDED_REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).size()) + .isGreaterThanOrEqualTo(1); assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java index 41a6ce94d862..5e951d432ffe 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetryHeaderTest.java @@ -31,7 +31,7 @@ public class AsyncRetryHeaderTest extends RetryHeaderTestSuite c.retryPolicy(RetryMode.STANDARD)) + .overrideConfiguration(c -> c.retryStrategy(RetryMode.STANDARD)) .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost")) diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java index 3d623dc42447..22cce99f73b6 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/ClientRetryModeTestSuite.java @@ -48,7 +48,8 @@ public abstract class ClientRetryModeTestSuite o.retryPolicy(RetryMode.LEGACY)).build(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryStrategy(RetryMode.LEGACY)) + .build(); assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class); verifyRequestCount(4); } @@ -56,7 +57,7 @@ public void legacyRetryModeIsFourAttempts() { @Test public void standardRetryModeIsThreeAttempts() { stubThrottlingResponse(); - ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.STANDARD)).build(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryStrategy(RetryMode.STANDARD)).build(); assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class); verifyRequestCount(3); } @@ -80,8 +81,7 @@ public void legacyRetryModeExcludesThrottlingExceptions() throws InterruptedExce stubThrottlingResponse(); ExecutorService executor = Executors.newFixedThreadPool(51); - ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.LEGACY)).build(); - + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryStrategy(RetryMode.LEGACY)).build(); for (int i = 0; i < 51; ++i) { executor.execute(() -> assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class)); } @@ -97,7 +97,7 @@ public void standardRetryModeIncludesThrottlingExceptions() throws InterruptedEx stubThrottlingResponse(); ExecutorService executor = Executors.newFixedThreadPool(51); - ClientT client = clientBuilder().overrideConfiguration(o -> o.retryPolicy(RetryMode.STANDARD)).build(); + ClientT client = clientBuilder().overrideConfiguration(o -> o.retryStrategy(RetryMode.STANDARD)).build(); for (int i = 0; i < 51; ++i) { executor.execute(() -> assertThatThrownBy(() -> callAllTypes(client)).isInstanceOf(SdkException.class)); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java index 1b709e524cb9..2644227b9e1a 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/RetryHeaderTestSuite.java @@ -82,10 +82,8 @@ public void retryAttemptAndMaxAreCorrect() { assertThat(requests).hasSize(2); assertThat(retryComponent(requests.get(0), "attempt")).isEqualTo("1"); assertThat(retryComponent(requests.get(1), "attempt")).isEqualTo("2"); - /* TODO: fixme, we do not set the max field correctly at the moment assertThat(retryComponent(requests.get(0), "max")).isEqualTo("3"); assertThat(retryComponent(requests.get(1), "max")).isEqualTo("3"); - */ } private String invocationId(SdkHttpRequest request) { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java index 54bf381352c2..23e73db3cae0 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetryHeaderTest.java @@ -29,7 +29,7 @@ public class SyncRetryHeaderTest extends RetryHeaderTestSuite c.retryPolicy(RetryMode.STANDARD)) + .overrideConfiguration(c -> c.retryStrategy(RetryMode.STANDARD)) .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) .region(Region.US_EAST_1) diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 62b63af64d5a..d8d976724cac 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -118,6 +118,16 @@ endpoints-spi ${awsjavasdk.version} + + software.amazon.awssdk + retries + ${awsjavasdk.version} + + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java index 506116373265..da7e1f4a2946 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java @@ -33,10 +33,10 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; @@ -85,7 +85,7 @@ public void connectionError_completionWithNioThreadWorksCorrectly() { .endpointOverride(URI.create("http://localhost:" + wireMock.port())) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .asyncConfiguration(c -> c.advancedOption(FUTURE_COMPLETION_EXECUTOR, mockExecutor)) - .overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) .build(); assertThatThrownBy(() -> @@ -107,7 +107,7 @@ public void serverError_completionWithNioThreadWorksCorrectly() { .region(Region.US_WEST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) - .overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) .asyncConfiguration(c -> c.advancedOption(FUTURE_COMPLETION_EXECUTOR, mockExecutor)) .build(); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/clockskew/ClockSkewAdjustmentTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/clockskew/ClockSkewAdjustmentTest.java index 21be8a282843..1d970b7e6f46 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/clockskew/ClockSkewAdjustmentTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/clockskew/ClockSkewAdjustmentTest.java @@ -205,7 +205,7 @@ private ProtocolJsonRpcClient createClient(int retryCount) { .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryPolicy(r -> r.numRetries(retryCount))) + .overrideConfiguration(c -> c.retryStrategy(r -> r.maxAttempts(retryCount + 1))) .build(); } @@ -214,7 +214,7 @@ private ProtocolJsonRpcAsyncClient createAsyncClient(int retryCount) { .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryPolicy(r -> r.numRetries(retryCount))) + .overrideConfiguration(c -> c.retryStrategy(r -> r.maxAttempts(retryCount + 1))) .build(); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java index 1a17986dc30f..f2fe3e0a5a3f 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java @@ -32,14 +32,13 @@ import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.exception.AbortedException; import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.metrics.MetricCollection; @@ -151,7 +150,9 @@ void interruptionDueToApiTimeOut_followed_byInterruptCausesOnlyTimeOutException( stubPostRequest("/2016-03-11/allTypes", aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); ExceptionInThreadRun exception = new ExceptionInThreadRun(); ProtocolRestJsonClient client = - getClient(httpClient, Duration.ofMillis(10)).overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())).build(); + getClient(httpClient, Duration.ofMillis(10)) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) + .build(); unInterruptedSleep(100); // We need to creat a separate thread to interrupt it externally. Thread leaseWaitingThread = new Thread(() -> { diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java index ff48e60b0575..8d0853bef54b 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java @@ -30,7 +30,7 @@ import org.junit.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocoljsonrpc.ProtocolJsonRpcAsyncClient; import software.amazon.awssdk.services.protocoljsonrpc.model.AllTypesRequest; @@ -127,7 +127,7 @@ public void shouldRetryOnAwsThrottlingErrorCode() { } @Test - public void retryPolicyNone_shouldNotRetry() { + public void retryStrategyNone_shouldNotRetry() { stubFor(post(urlEqualTo(PATH)) .inScenario("retry at 500") .whenScenarioStateIs(Scenario.STARTED) @@ -149,7 +149,7 @@ public void retryPolicyNone_shouldNotRetry() { "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryPolicy(RetryPolicy.none())) + .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.none())) .build(); assertThatThrownBy(() -> clientWithNoRetry.allTypes(AllTypesRequest.builder().build()).join()) diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java index 6ea489b2c3b8..92c15ad668af 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java @@ -30,6 +30,7 @@ import org.junit.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocoljsonrpc.ProtocolJsonRpcClient; @@ -126,7 +127,7 @@ public void shouldRetryOnAwsThrottlingErrorCode() { } @Test - public void retryPolicyNone_shouldNotRetry() { + public void retryStrategyNone_shouldNotRetry() { stubFor(post(urlEqualTo(PATH)) .inScenario("retry at 500") .whenScenarioStateIs(Scenario.STARTED) @@ -148,7 +149,7 @@ public void retryPolicyNone_shouldNotRetry() { "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryPolicy(RetryPolicy.none())) + .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.none())) .build(); assertThatThrownBy(() -> clientWithNoRetry.allTypes(AllTypesRequest.builder().build())).isInstanceOf(ProtocolJsonRpcException.class); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java index c7e09e4440db..c407455626a5 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java @@ -28,13 +28,14 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.protocol.tests.timeout.BaseApiCallAttemptTimeoutTest; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; import software.amazon.awssdk.services.protocolrestjson.model.StreamingOutputOperationRequest; @@ -59,7 +60,7 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryPolicy(RetryPolicy.none())) + .retryStrategy(AwsRetryStrategy.none())) .build(); clientWithRetry = ProtocolRestJsonAsyncClient.builder() @@ -67,9 +68,11 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryPolicy(RetryPolicy.builder() - .numRetries(1) - .build())) + .retryStrategy(AwsRetryStrategy.standardRetryStrategy() + .toBuilder() + .backoffStrategy(BackoffStrategy.retryImmediately()) + .maxAttempts(2) + .build())) .build(); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java index 82a7c447ce30..eefa47f2ed27 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java @@ -27,12 +27,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.ApiCallTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; import software.amazon.awssdk.protocol.tests.timeout.BaseApiCallTimeoutTest; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; @@ -57,17 +57,18 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryPolicy(RetryPolicy.none())) + .retryStrategy(AwsRetryStrategy.none())) .build(); clientWithRetry = ProtocolRestJsonAsyncClient.builder() .region(Region.US_WEST_1) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryPolicy(RetryPolicy.builder() - .backoffStrategy(BackoffStrategy.none()) - .numRetries(1) - .build())) + .retryStrategy(AwsRetryStrategy.standardRetryStrategy() + .toBuilder() + .backoffStrategy(BackoffStrategy.retryImmediately()) + .maxAttempts(2) + .build())) .httpClient(mockClient) .build(); } @@ -136,7 +137,7 @@ public ProtocolRestJsonAsyncClient createClientWithMockClient(MockAsyncHttpClien .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryPolicy(RetryPolicy.none())) + .retryStrategy(AwsRetryStrategy.none())) .build(); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java index 1ddc5fbf2bc7..84725f07c539 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java @@ -24,11 +24,12 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.protocol.tests.timeout.BaseApiCallAttemptTimeoutTest; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; import software.amazon.awssdk.testutils.service.http.MockHttpClient; @@ -53,7 +54,7 @@ public void setup() { .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration( b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryPolicy(RetryPolicy.none())) + .retryStrategy(AwsRetryStrategy.none())) .build(); clientWithRetry = ProtocolRestJsonClient.builder() @@ -62,9 +63,11 @@ public void setup() { .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration( b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryPolicy(RetryPolicy.builder() - .numRetries(1) - .build())) + .retryStrategy(AwsRetryStrategy.standardRetryStrategy() + .toBuilder() + .backoffStrategy(BackoffStrategy.retryImmediately()) + .maxAttempts(2) + .build())) .build(); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java index cb75652605b7..8dc66cea9b37 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java @@ -26,12 +26,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.exception.ApiCallTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.protocol.tests.timeout.BaseApiCallTimeoutTest; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; @@ -58,7 +58,7 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryPolicy(RetryPolicy.none())) + .retryStrategy(AwsRetryStrategy.none())) .build(); clientWithRetry = ProtocolRestJsonClient.builder() @@ -66,10 +66,11 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryPolicy(RetryPolicy.builder() - .backoffStrategy(BackoffStrategy.none()) - .numRetries(1) - .build())) + .retryStrategy(AwsRetryStrategy.standardRetryStrategy() + .toBuilder() + .backoffStrategy(BackoffStrategy.retryImmediately()) + .maxAttempts(2) + .build())) .build(); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java index e309b3100405..1c3e3e63679d 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.protocol.tests.timeout.sync; import java.time.Duration; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; import software.amazon.awssdk.core.retry.RetryPolicy; @@ -32,6 +33,7 @@ Class expectedException() { @Override ClientOverrideConfiguration clientOverrideConfiguration() { - return ClientOverrideConfiguration.builder().apiCallAttemptTimeout(Duration.ofMillis(TIMEOUT)).retryPolicy(RetryPolicy.none()).build(); + return ClientOverrideConfiguration.builder().apiCallAttemptTimeout(Duration.ofMillis(TIMEOUT)) + .retryStrategy(AwsRetryStrategy.none()).build(); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java index 47cf5d32a445..aec142d71bcb 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java @@ -16,9 +16,9 @@ package software.amazon.awssdk.protocol.tests.timeout.sync; import java.time.Duration; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.exception.ApiCallTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; /** * A set of tests to test ApiCallTimeout for synchronous streaming operations because they are tricky. @@ -34,7 +34,7 @@ Class expectedException() { ClientOverrideConfiguration clientOverrideConfiguration() { return ClientOverrideConfiguration.builder() .apiCallTimeout(Duration.ofMillis(TIMEOUT)) - .retryPolicy(RetryPolicy.none()) + .retryStrategy(AwsRetryStrategy.none()) .build(); } } From 64648ebfec403174c75e27effaead1fabc5ec916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Tue, 27 Jun 2023 19:08:38 -0700 Subject: [PATCH 16/32] Update sdk version --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 60f4f82d5116..116fd8162723 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.88-SNAPSHOT + 2.20.95-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 6247aab313bc..64dfb146e3b6 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.88-SNAPSHOT + 2.20.95-SNAPSHOT 4.0.0 From 75842c16209d00595d6b84094d0dfb050f75105e Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Fri, 30 Jun 2023 10:44:59 -0700 Subject: [PATCH 17/32] Deprecate legacy classes and use new when possible (#4154) * Deprecate legacy classes and use new when possible * Fix checkstyle and add some more validation * Add missing @Deprecated annotation * Add missing dependency to the retries-api module --- .../awssdk/awscore/retry/AwsRetryPolicy.java | 3 ++ .../awscore/client/utils/HttpTestUtils.java | 17 +------ .../awssdk/retries/api/RetryStrategy.java | 7 +++ .../backoff/BackoffStrategiesConstants.java | 2 +- .../retries/api/RetryStrategyBuilderTest.java | 5 ++ .../awssdk/retries/DefaultRetryStrategy.java | 2 +- .../awssdk/retries/LegacyRetryStrategy.java | 7 --- .../awssdk/retries/StandardRetryStrategy.java | 7 --- .../retries/internal/BaseRetryStrategy.java | 1 - .../DefaultAdaptiveRetryStrategy.java | 9 ++-- .../internal/DefaultLegacyRetryStrategy.java | 7 +-- ...azonHttpClientSslHandshakeTimeoutTest.java | 4 +- ...tionPoolMaxConnectionsIntegrationTest.java | 4 +- .../internal/retry/RetryPolicyAdapter.java | 6 +++ .../amazon/awssdk/core/retry/RetryPolicy.java | 3 ++ .../awssdk/core/retry/RetryPolicyContext.java | 4 +- .../core/retry/backoff/BackoffStrategy.java | 4 ++ .../backoff/EqualJitterBackoffStrategy.java | 4 ++ .../backoff/FixedDelayBackoffStrategy.java | 4 ++ .../backoff/FullJitterBackoffStrategy.java | 4 ++ .../core/retry/conditions/RetryCondition.java | 5 ++ .../client/handler/SyncClientHandlerTest.java | 4 +- .../AsyncHttpClientApiCallTimeoutTests.java | 3 -- .../HttpClientApiCallAttemptTimeoutTest.java | 2 - .../timers/HttpClientApiCallTimeoutTest.java | 1 - .../core/retry/FixedTimeBackoffStrategy.java | 4 ++ .../src/test/java/utils/HttpTestUtils.java | 29 ----------- .../retry/SimpleArrayBackoffStrategy.java | 38 -------------- .../dynamodb/DynamoDbRetryPolicy.java | 8 +++ .../dynamodb/DynamoDbRetryPolicyTest.java | 49 +++++++------------ services/iam/pom.xml | 5 ++ .../services/iam/ServiceIntegrationTest.java | 3 +- services/s3/pom.xml | 5 ++ .../internal/crt/DefaultS3CrtAsyncClient.java | 4 +- .../s3/functionaltests/RetriesOn200Test.java | 7 +-- .../tests/retry/AwsJsonRetryTest.java | 1 - ...ingOperationApiCallAttemptTimeoutTest.java | 1 - test/stability-tests/pom.xml | 12 +++++ .../CloudWatchCrtAsyncStabilityTest.java | 6 +-- .../CloudWatchNettyAsyncStabilityTest.java | 4 +- .../tests/s3/S3NettyAsyncStabilityTest.java | 4 +- ...S3WithCrtAsyncHttpClientStabilityTest.java | 4 +- .../tests/sqs/SqsCrtAsyncStabilityTest.java | 6 +-- 43 files changed, 134 insertions(+), 175 deletions(-) delete mode 100644 core/sdk-core/src/test/java/utils/retry/SimpleArrayBackoffStrategy.java diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java index 4899a2601b02..32d3123b6e8a 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryPolicy.java @@ -25,8 +25,11 @@ /** * Retry Policy used by clients when communicating with AWS services. + * + * @deprecated Use instead {@link AwsRetryStrategy} */ @SdkPublicApi +@Deprecated public final class AwsRetryPolicy { private AwsRetryPolicy() { diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java index e92e771f390f..117d0ad01a29 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/client/utils/HttpTestUtils.java @@ -21,13 +21,13 @@ import java.util.concurrent.Executors; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.loader.DefaultSdkHttpClientBuilder; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpConfigurationOption; @@ -51,7 +51,7 @@ public static SdkClientConfiguration testClientConfiguration() { return SdkClientConfiguration.builder() .option(SdkClientOption.EXECUTION_INTERCEPTORS, new ArrayList<>()) .option(SdkClientOption.ENDPOINT, URI.create("http://localhost:8080")) - .option(SdkClientOption.RETRY_POLICY, RetryPolicy.defaultRetryPolicy()) + .option(SdkClientOption.RETRY_STRATEGY, AwsRetryStrategy.defaultRetryStrategy()) .option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, new HashMap<>()) .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) .option(AwsClientOption.CREDENTIALS_PROVIDER, DefaultCredentialsProvider.create()) @@ -64,14 +64,8 @@ public static SdkClientConfiguration testClientConfiguration() { } public static class TestClientBuilder { - private RetryPolicy retryPolicy; private SdkHttpClient httpClient; - public TestClientBuilder retryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = retryPolicy; - return this; - } - public TestClientBuilder httpClient(SdkHttpClient sdkHttpClient) { this.httpClient = sdkHttpClient; return this; @@ -81,14 +75,7 @@ public AmazonSyncHttpClient build() { SdkHttpClient sdkHttpClient = this.httpClient != null ? this.httpClient : testSdkHttpClient(); return new AmazonSyncHttpClient(testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, sdkHttpClient) - .applyMutation(this::configureRetryPolicy) .build()); } - - private void configureRetryPolicy(SdkClientConfiguration.Builder builder) { - if (retryPolicy != null) { - builder.option(SdkClientOption.RETRY_POLICY, retryPolicy); - } - } } } diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java index cc37143b2bb6..016ddff2d1b1 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -204,6 +204,13 @@ default B retryOnRootCauseInstanceOf(Class throwable) { */ B maxAttempts(int maxAttempts); + /** + * Configure the backoff strategy used by this executor. + * + *

    By default, this uses jittered exponential backoff. + */ + B backoffStrategy(BackoffStrategy backoffStrategy); + /** * Build a new {@link RetryStrategy} with the current configuration on this builder. */ diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java index 72a9a6b043fb..75db8b5ddb06 100644 --- a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java +++ b/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java @@ -42,7 +42,7 @@ private BackoffStrategiesConstants() { * get capped to 30. */ static int calculateExponentialDelay(int retriesAttempted, Duration baseDelay, Duration maxBackoffTime) { - int cappedRetries = Math.min(retriesAttempted, BackoffStrategiesConstants.RETRIES_ATTEMPTED_CEILING); + int cappedRetries = Math.min(retriesAttempted, RETRIES_ATTEMPTED_CEILING); return (int) Math.min(baseDelay.multipliedBy(1L << (cappedRetries - 2)).toMillis(), maxBackoffTime.toMillis()); } } diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index 227a57e6b2ea..99c9ec7ae516 100644 --- a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -153,6 +153,11 @@ public BuilderToTestDefaults maxAttempts(int maxAttempts) { return this; } + @Override + public BuilderToTestDefaults backoffStrategy(BackoffStrategy backoffStrategy) { + return this; + } + @Override public DummyRetryStrategy build() { return null; diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index 2900a78c7765..7df5c5cf710a 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -95,7 +95,7 @@ public static AdaptiveRetryStrategy.Builder adaptiveStrategyBuilder() { static final class Standard { static final int MAX_ATTEMPTS = 3; - static final Duration BASE_DELAY = Duration.ofSeconds(1); + static final Duration BASE_DELAY = Duration.ofMillis(100); static final Duration MAX_BACKOFF = Duration.ofSeconds(20); static final int TOKEN_BUCKET_SIZE = 500; static final int DEFAULT_EXCEPTION_TOKEN_COST = 5; diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java index 4ca93f8482c5..91b13d4f1d2a 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java @@ -74,13 +74,6 @@ static Builder builder() { Builder toBuilder(); interface Builder extends RetryStrategy.Builder { - /** - * Configure the backoff strategy used by this strategy. - * - *

    By default, this uses jittered exponential backoff. - */ - Builder backoffStrategy(BackoffStrategy backoffStrategy); - /** * Configure the backoff strategy used for throttling exceptions by this strategy. * diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java index ed7069162e2a..ae2b38feea68 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java @@ -70,13 +70,6 @@ static Builder builder() { Builder toBuilder(); interface Builder extends RetryStrategy.Builder { - /** - * Configure the backoff strategy used by this executor. - * - *

    By default, this uses jittered exponential backoff. - */ - Builder backoffStrategy(BackoffStrategy backoffStrategy); - /** * Whether circuit breaking is enabled for this executor. * diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java index f66f1e68a729..def50765d37b 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java @@ -146,7 +146,6 @@ public int maxAttempts() { @Override public abstract B toBuilder(); - /** * Computes the backoff before the first attempt, by default * {@link Duration#ZERO}. Extending classes can override diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java index 70347d8a9c4f..137b264496ba 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java @@ -111,13 +111,14 @@ public Builder treatAsThrottling(Predicate treatAsThrottling) { return this; } - public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { - setCircuitBreakerEnabled(circuitBreakerEnabled); + @Override + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + setBackoffStrategy(backoffStrategy); return this; } - public Builder backoffStrategy(BackoffStrategy backoffStrategy) { - setBackoffStrategy(backoffStrategy); + public Builder circuitBreakerEnabled(Boolean circuitBreakerEnabled) { + setCircuitBreakerEnabled(circuitBreakerEnabled); return this; } diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java index 67cb750b8e8f..a4aea00b2f78 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; @SdkInternalApi public final class DefaultLegacyRetryStrategy @@ -34,9 +35,9 @@ public final class DefaultLegacyRetryStrategy DefaultLegacyRetryStrategy(Builder builder) { super(LOG, builder); - this.throttlingExceptionCost = builder.throttlingExceptionCost; - this.throttlingBackoffStrategy = builder.throttlingBackoffStrategy; - this.treatAsThrottling = builder.treatAsThrottling; + this.throttlingExceptionCost = Validate.paramNotNull(builder.throttlingExceptionCost, "throttlingExceptionCost"); + this.throttlingBackoffStrategy = Validate.paramNotNull(builder.throttlingBackoffStrategy, "throttlingBackoffStrategy"); + this.treatAsThrottling = Validate.paramNotNull(builder.treatAsThrottling, "treatAsThrottling"); } @Override diff --git a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java index 77ec0d60a971..783b39161f0e 100644 --- a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java +++ b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java @@ -28,10 +28,10 @@ import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.response.NullErrorResponseHandler; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import utils.HttpTestUtils; /** @@ -48,7 +48,7 @@ public class AmazonHttpClientSslHandshakeTimeoutTest extends UnresponsiveMockSer @Test(timeout = 60 * 1000) public void testSslHandshakeTimeout() { AmazonSyncHttpClient httpClient = HttpTestUtils.testClientBuilder() - .retryPolicy(RetryPolicy.none()) + .retryStrategy(DefaultRetryStrategy.none()) .httpClient(ApacheHttpClient.builder() .socketTimeout(CLIENT_SOCKET_TO) .build()) diff --git a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java index ce2a0c73e410..c017392c09c6 100644 --- a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java +++ b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java @@ -30,10 +30,10 @@ import software.amazon.awssdk.core.http.server.MockServer; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.response.EmptySdkResponseHandler; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import utils.HttpTestUtils; public class ConnectionPoolMaxConnectionsIntegrationTest { @@ -57,7 +57,7 @@ public static void tearDown() { public void leasing_a_new_connection_fails_with_connection_pool_timeout() { AmazonSyncHttpClient httpClient = HttpTestUtils.testClientBuilder() - .retryPolicy(RetryPolicy.none()) + .retryStrategy(DefaultRetryStrategy.none()) .httpClient(ApacheHttpClient.builder() .connectionTimeout(Duration.ofMillis(100)) .maxConnections(1) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java index 35a941103486..0cc388a343b6 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java @@ -27,6 +27,7 @@ import software.amazon.awssdk.core.retry.RetryUtils; import software.amazon.awssdk.retries.api.AcquireInitialTokenRequest; import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.BackoffStrategy; import software.amazon.awssdk.retries.api.RecordSuccessRequest; import software.amazon.awssdk.retries.api.RecordSuccessResponse; import software.amazon.awssdk.retries.api.RefreshRetryTokenRequest; @@ -164,6 +165,11 @@ public Builder maxAttempts(int maxAttempts) { throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling retryOnException"); } + @Override + public Builder backoffStrategy(BackoffStrategy backoffStrategy) { + throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling backoffStrategy"); + } + public Builder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java index 2830655af043..12678c98b6c4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java @@ -42,9 +42,12 @@ * * @see RetryCondition for a list of SDK provided retry condition strategies * @see BackoffStrategy for a list of SDK provided backoff strategies + * + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.RetryStrategy}. */ @Immutable @SdkPublicApi +@Deprecated public final class RetryPolicy implements ToCopyableBuilder { private final boolean additionalRetryConditionsAllowed; private final RetryMode retryMode; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicyContext.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicyContext.java index 138ba2ab2ac7..c56caa433108 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicyContext.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicyContext.java @@ -21,12 +21,13 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; /** * Contains useful information about a failed request that can be used to make retry and backoff decisions. See {@link - * RetryPolicy}. + * RetryPolicy} and {@link RetryStrategy}. */ @Immutable @SdkPublicApi @@ -163,5 +164,4 @@ public RetryPolicyContext build() { } } - } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/BackoffStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/BackoffStrategy.java index 6ca831f7dd1a..67d1f2855ea0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/BackoffStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/BackoffStrategy.java @@ -21,8 +21,12 @@ import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicyContext; +/** + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.BackoffStrategy} + */ @SdkPublicApi @FunctionalInterface +@Deprecated public interface BackoffStrategy { /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/EqualJitterBackoffStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/EqualJitterBackoffStrategy.java index e5ce7c0024b3..e2936a7697cf 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/EqualJitterBackoffStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/EqualJitterBackoffStrategy.java @@ -39,8 +39,12 @@ * * This is in contrast to {@link FullJitterBackoffStrategy} where the final computed delay before the next retry will be * between 0 and the computed exponential delay. + * + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.BackoffStrategy} and + * {@link software.amazon.awssdk.retries.api.BackoffStrategy#fixedDelayWithoutJitter(Duration)} */ @SdkPublicApi +@Deprecated public final class EqualJitterBackoffStrategy implements BackoffStrategy, ToCopyableBuilder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FixedDelayBackoffStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FixedDelayBackoffStrategy.java index 24003c5f4bac..5f7d226e4c6b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FixedDelayBackoffStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FixedDelayBackoffStrategy.java @@ -24,8 +24,12 @@ /** * Simple backoff strategy that always uses a fixed delay for the delay before the next retry attempt. + * + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.BackoffStrategy} and + * {@link software.amazon.awssdk.retries.api.BackoffStrategy#fixedDelay(Duration)}. */ @SdkPublicApi +@Deprecated public final class FixedDelayBackoffStrategy implements BackoffStrategy { private final Duration fixedBackoff; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java index 5624d04860cc..36eccbb7a56a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/backoff/FullJitterBackoffStrategy.java @@ -36,8 +36,12 @@ * * This is in contrast to {@link EqualJitterBackoffStrategy} that computes a new random delay where the final * computed delay before the next retry will be at least half of the computed exponential delay. + * + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.BackoffStrategy} and + * {@link software.amazon.awssdk.retries.api.BackoffStrategy#exponentialDelayWithoutJitter(Duration, Duration)}. */ @SdkPublicApi +@Deprecated public final class FullJitterBackoffStrategy implements BackoffStrategy, ToCopyableBuilder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryCondition.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryCondition.java index 3ec8dce22edc..69c83a0b41ee 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryCondition.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryCondition.java @@ -15,12 +15,17 @@ package software.amazon.awssdk.core.retry.conditions; +import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetrySetting; import software.amazon.awssdk.core.retry.RetryPolicyContext; +/** + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.RetryStrategy.Builder#retryOnException(Predicate)}. + */ @SdkPublicApi @FunctionalInterface +@Deprecated public interface RetryCondition { /** * Determine whether a request should or should not be retried. diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java index 0fe158a08082..ff9ddc1d8c8e 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java @@ -42,7 +42,6 @@ import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.protocol.VoidSdkResponse; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.runtime.transform.Marshaller; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.http.AbortableInputStream; @@ -51,6 +50,7 @@ import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import utils.HttpTestUtils; import utils.ValidSdkObjects; @@ -205,7 +205,7 @@ private ClientExecutionParams clientExecutionParams() { public SdkClientConfiguration clientConfiguration() { return HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, httpClient) - .option(SdkClientOption.RETRY_POLICY, RetryPolicy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java index 7c2bbe018b7b..9af0dd441b0c 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java @@ -47,12 +47,10 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.internal.http.AmazonAsyncHttpClient; import software.amazon.awssdk.core.internal.http.request.SlowExecutionInterceptor; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.retries.DefaultRetryStrategy; -import software.amazon.awssdk.retries.api.RetryStrategy; import utils.ValidSdkObjects; public class AsyncHttpClientApiCallTimeoutTests { @@ -65,7 +63,6 @@ public class AsyncHttpClientApiCallTimeoutTests { @Before public void setup() { httpClient = testAsyncClientBuilder() - .retryPolicy(RetryPolicy.none()) .retryStrategy(DefaultRetryStrategy.none()) .apiCallTimeout(API_CALL_TIMEOUT) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java index 43c07d9082fe..a980536313ca 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java @@ -42,7 +42,6 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.request.SlowExecutionInterceptor; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -60,7 +59,6 @@ public class HttpClientApiCallAttemptTimeoutTest { @Before public void setup() { httpClient = testClientBuilder() - .retryPolicy(RetryPolicy.none()) .retryStrategy(DefaultRetryStrategy.none()) .apiCallAttemptTimeout(API_CALL_TIMEOUT) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java index cef3359cc2c9..3f03af6dcf1a 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java @@ -60,7 +60,6 @@ public class HttpClientApiCallTimeoutTest { @Before public void setup() { httpClient = testClientBuilder() - .retryPolicy(RetryPolicy.none()) .retryStrategy(DefaultRetryStrategy.none()) .apiCallTimeout(API_CALL_TIMEOUT) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/FixedTimeBackoffStrategy.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/FixedTimeBackoffStrategy.java index eccaad72c0d5..337aa3f55466 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/FixedTimeBackoffStrategy.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/FixedTimeBackoffStrategy.java @@ -20,7 +20,11 @@ /** * Test implementation of {@link BackoffStrategy} to wait a fixed time between retries + * + * @deprecated Use instead {@link software.amazon.awssdk.retries.api.BackoffStrategy} and + * {@link software.amazon.awssdk.retries.api.BackoffStrategy#fixedDelay(Duration)}. */ +@Deprecated public class FixedTimeBackoffStrategy implements BackoffStrategy { private final Duration fixedTimeDelay; diff --git a/core/sdk-core/src/test/java/utils/HttpTestUtils.java b/core/sdk-core/src/test/java/utils/HttpTestUtils.java index 242109d4b07d..b9279c283d2d 100644 --- a/core/sdk-core/src/test/java/utils/HttpTestUtils.java +++ b/core/sdk-core/src/test/java/utils/HttpTestUtils.java @@ -33,12 +33,10 @@ import software.amazon.awssdk.core.internal.http.loader.DefaultSdkAsyncHttpClientBuilder; import software.amazon.awssdk.core.internal.http.loader.DefaultSdkHttpClientBuilder; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.utils.AttributeMap; @@ -73,7 +71,6 @@ public static SdkClientConfiguration testClientConfiguration() { return SdkClientConfiguration.builder() .option(SdkClientOption.EXECUTION_INTERCEPTORS, new ArrayList<>()) .option(SdkClientOption.ENDPOINT, URI.create("http://localhost:8080")) - .option(SdkClientOption.RETRY_POLICY, RetryPolicy.defaultRetryPolicy()) .option(SdkClientOption.RETRY_STRATEGY, SdkDefaultRetryStrategy.defaultRetryStrategy()) .option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, new HashMap<>()) @@ -87,18 +84,12 @@ public static SdkClientConfiguration testClientConfiguration() { } public static class TestClientBuilder { - private RetryPolicy retryPolicy; private RetryStrategy retryStrategy; private SdkHttpClient httpClient; private Map additionalHeaders = new HashMap<>(); private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; - public TestClientBuilder retryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = retryPolicy; - return this; - } - public TestClientBuilder retryStrategy(RetryStrategy retryStrategy) { this.retryStrategy = retryStrategy; return this; @@ -128,7 +119,6 @@ public AmazonSyncHttpClient build() { SdkHttpClient sdkHttpClient = this.httpClient != null ? this.httpClient : testSdkHttpClient(); return new AmazonSyncHttpClient(testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, sdkHttpClient) - .applyMutation(this::configureRetryPolicy) .applyMutation(this::configureRetryStrategy) .applyMutation(this::configureAdditionalHeaders) .option(SdkClientOption.API_CALL_TIMEOUT, apiCallTimeout) @@ -145,12 +135,6 @@ private void configureAdditionalHeaders(SdkClientConfiguration.Builder builder) builder.option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, headers); } - private void configureRetryPolicy(SdkClientConfiguration.Builder builder) { - if (retryPolicy != null) { - builder.option(SdkClientOption.RETRY_POLICY, retryPolicy); - } - } - private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { if (retryStrategy != null) { builder.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); @@ -159,18 +143,12 @@ private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { } public static class TestAsyncClientBuilder { - private RetryPolicy retryPolicy; private RetryStrategy retryStrategy; private SdkAsyncHttpClient asyncHttpClient; private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; private Map additionalHeaders = new HashMap<>(); - public TestAsyncClientBuilder retryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = retryPolicy; - return this; - } - public TestAsyncClientBuilder retryStrategy(RetryStrategy retryStrategy) { this.retryStrategy = retryStrategy; return this; @@ -203,7 +181,6 @@ public AmazonAsyncHttpClient build() { .option(SdkClientOption.API_CALL_TIMEOUT, apiCallTimeout) .option(SdkClientOption.API_CALL_ATTEMPT_TIMEOUT, apiCallAttemptTimeout) - .applyMutation(this::configureRetryPolicy) .applyMutation(this::configureRetryStrategy) .applyMutation(this::configureAdditionalHeaders) .build()); @@ -217,12 +194,6 @@ private void configureAdditionalHeaders(SdkClientConfiguration.Builder builder) builder.option(SdkClientOption.ADDITIONAL_HTTP_HEADERS, headers); } - private void configureRetryPolicy(SdkClientConfiguration.Builder builder) { - if (retryPolicy != null) { - builder.option(SdkClientOption.RETRY_POLICY, retryPolicy); - } - } - private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { if (retryStrategy != null) { builder.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); diff --git a/core/sdk-core/src/test/java/utils/retry/SimpleArrayBackoffStrategy.java b/core/sdk-core/src/test/java/utils/retry/SimpleArrayBackoffStrategy.java deleted file mode 100644 index ed782a2031cc..000000000000 --- a/core/sdk-core/src/test/java/utils/retry/SimpleArrayBackoffStrategy.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package utils.retry; - -import java.time.Duration; -import software.amazon.awssdk.core.retry.RetryPolicyContext; -import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; - -/** - * Backoff strategy used in tests to pull backoff value from a backing array. Number of retries is - * limited to size of array. - */ -public final class SimpleArrayBackoffStrategy implements BackoffStrategy { - - private final int[] backoffValues; - - public SimpleArrayBackoffStrategy(int[] backoffValues) { - this.backoffValues = backoffValues; - } - - @Override - public Duration computeDelayBeforeNextRetry(RetryPolicyContext context) { - return Duration.ofMillis(backoffValues[context.retriesAttempted()]); - } -} diff --git a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java index f34bacbbcae5..e85220e02d61 100644 --- a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java +++ b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.services.dynamodb; +import static software.amazon.awssdk.retries.api.BackoffStrategy.exponentialDelay; + import java.time.Duration; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.retry.AwsRetryPolicy; @@ -64,6 +66,10 @@ final class DynamoDbRetryPolicy { private DynamoDbRetryPolicy() { } + /** + * @deprecated Use instead {@link #resolveRetryStrategy}. + */ + @Deprecated public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { RetryPolicy configuredRetryPolicy = config.option(SdkClientOption.RETRY_POLICY); if (configuredRetryPolicy != null) { @@ -95,9 +101,11 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { .profileName(config.option(SdkClientOption.PROFILE_NAME)) .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) .resolve(); + return AwsRetryStrategy.forRetryMode(retryMode) .toBuilder() .maxAttempts(MAX_ATTEMPTS) + .backoffStrategy(exponentialDelay(BASE_DELAY, SdkDefaultRetrySetting.MAX_BACKOFF)) .build(); } } diff --git a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java index 6b4863c996d6..aabb88a2d0af 100644 --- a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java +++ b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java @@ -2,18 +2,16 @@ import static org.assertj.core.api.Assertions.assertThat; -import java.time.Duration; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; -import software.amazon.awssdk.core.retry.backoff.FullJitterBackoffStrategy; import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.testutils.EnvironmentVariableHelper; import software.amazon.awssdk.utils.StringInputStream; @@ -34,38 +32,25 @@ public void reset() { @Test void test_numRetries_with_standardRetryPolicy() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "standard"); - final SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - final RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - assertThat(retryPolicy.numRetries()).isEqualTo(8); + SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + assertThat(retryStrategy.maxAttempts()).isEqualTo(9); } @Test void test_numRetries_with_legacyRetryPolicy() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "legacy"); - final SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - final RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - assertThat(retryPolicy.numRetries()).isEqualTo(8); - } - - @Test - void test_backoffBaseDelay_with_standardRetryPolicy() { - environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "standard"); SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - BackoffStrategy backoffStrategy = retryPolicy.backoffStrategy(); - - assertThat(backoffStrategy).isInstanceOfSatisfying(FullJitterBackoffStrategy.class, fjbs -> { - assertThat(fjbs.toBuilder().baseDelay()).isEqualTo(Duration.ofMillis(25)); - }); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + assertThat(retryStrategy.maxAttempts()).isEqualTo(9); } @Test void resolve_retryModeSetInEnv_doesNotCallSupplier() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "standard"); SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - RetryMode retryMode = retryPolicy.retryMode(); - + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); } @@ -82,8 +67,8 @@ void resolve_retryModeSetWithEnvAndSupplier_resolvesFromEnv() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - RetryMode retryMode = retryPolicy.retryMode(); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); } @@ -100,8 +85,8 @@ void resolve_retryModeSetWithSupplier_resolvesFromSupplier() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - RetryMode retryMode = retryPolicy.retryMode(); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.ADAPTIVE); } @@ -118,8 +103,8 @@ void resolve_retryModeSetWithSdkClientOption_resolvesFromSdkClientOption() { .option(SdkClientOption.PROFILE_NAME, "default") .option(SdkClientOption.DEFAULT_RETRY_MODE, RetryMode.STANDARD) .build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - RetryMode retryMode = retryPolicy.retryMode(); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); } @@ -135,8 +120,8 @@ void resolve_retryModeNotSetWithEnvNorSupplier_resolvesFromSdkDefault() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryPolicy retryPolicy = DynamoDbRetryPolicy.resolveRetryPolicy(sdkClientConfiguration); - RetryMode retryMode = retryPolicy.retryMode(); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.LEGACY); } diff --git a/services/iam/pom.xml b/services/iam/pom.xml index f6ecff4d8f62..e6fa5ef5d063 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -56,5 +56,10 @@ protocol-core ${awsjavasdk.version} + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + diff --git a/services/iam/src/it/java/software/amazon/awssdk/services/iam/ServiceIntegrationTest.java b/services/iam/src/it/java/software/amazon/awssdk/services/iam/ServiceIntegrationTest.java index dfd86831359d..5d1afa03140b 100644 --- a/services/iam/src/it/java/software/amazon/awssdk/services/iam/ServiceIntegrationTest.java +++ b/services/iam/src/it/java/software/amazon/awssdk/services/iam/ServiceIntegrationTest.java @@ -19,7 +19,6 @@ import org.junit.Before; import org.junit.Test; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.testutils.service.AwsTestBase; @@ -30,7 +29,7 @@ public class ServiceIntegrationTest extends AwsTestBase { public void setUp() { iam = IamClient.builder() .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(c -> c.retryPolicy(RetryPolicy.builder().numRetries(50).build())) + .overrideConfiguration(c -> c.retryStrategy(b -> b.maxAttempts(50))) .region(Region.AWS_GLOBAL) .build(); } diff --git a/services/s3/pom.xml b/services/s3/pom.xml index d5180315c75b..caf180f1193e 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -87,6 +87,11 @@ true + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + commons-io diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java index 860ac509932e..76d5ff6bbb71 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.awscore.AwsRequest; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.checksums.ChecksumValidation; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; @@ -39,7 +40,6 @@ import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.crt.io.ExponentialBackoffRetryOptions; import software.amazon.awssdk.crt.io.StandardRetryOptions; @@ -82,7 +82,7 @@ private static S3AsyncClient initializeS3AsyncClient(DefaultS3CrtClientBuilder b .putAdvancedOption(SdkAdvancedClientOption.SIGNER, new NoOpSigner()) .putExecutionAttribute(SdkExecutionAttribute.HTTP_RESPONSE_CHECKSUM_VALIDATION, ChecksumValidation.FORCE_SKIP) - .retryPolicy(RetryPolicy.none()) + .retryStrategy(AwsRetryStrategy.none()) .addExecutionInterceptor(new ValidateRequestInterceptor()) .addExecutionInterceptor(new AttachHttpAttributesExecutionInterceptor()); diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/RetriesOn200Test.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/RetriesOn200Test.java index 56bb6d4501ff..b76cd1fcd62f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/RetriesOn200Test.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/functionaltests/RetriesOn200Test.java @@ -30,8 +30,8 @@ import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.S3Exception; @@ -55,7 +55,7 @@ public void copyObjectRetriesOn200InternalErrorFailures() { .endpointOverride(URI.create("http://localhost:" + mockServer.port())) .region(Region.US_WEST_2) .credentialsProvider(AnonymousCredentialsProvider.create()) - .overrideConfiguration(c -> c.retryPolicy(RetryMode.STANDARD) + .overrideConfiguration(c -> c.retryStrategy(RetryMode.STANDARD) .addExecutionInterceptor(countingInterceptor)) .serviceConfiguration(c -> c.pathStyleAccessEnabled(true)) .build(); @@ -72,7 +72,8 @@ public void copyObjectRetriesOn200InternalErrorFailures() { assertThat(e.awsErrorDetails().errorCode()).isEqualTo(ERROR_CODE); assertThat(e.awsErrorDetails().errorMessage()).isEqualTo(ERROR_MESSAGE); }); - assertThat(countingInterceptor.attemptCount).isEqualTo(RetryPolicy.forRetryMode(RetryMode.STANDARD).numRetries() + 1); + assertThat(countingInterceptor.attemptCount) + .isEqualTo(SdkDefaultRetryStrategy.forRetryMode(RetryMode.STANDARD).maxAttempts()); } private static final class AttemptCountingInterceptor implements ExecutionInterceptor { diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java index 92c15ad668af..68bf4cf907c2 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java @@ -31,7 +31,6 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocoljsonrpc.ProtocolJsonRpcClient; import software.amazon.awssdk.services.protocoljsonrpc.model.AllTypesRequest; diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java index 1c3e3e63679d..ca1c960cde17 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java @@ -19,7 +19,6 @@ import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; -import software.amazon.awssdk.core.retry.RetryPolicy; /** * A set of tests to test ApiCallTimeout for synchronous streaming operations because they are tricky. diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 7370ca1befbd..b19fe6adcf74 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -83,6 +83,18 @@ ${awsjavasdk.version} test + + software.amazon.awssdk + retries-api + ${awsjavasdk.version} + test + + + software.amazon.awssdk + retries + ${awsjavasdk.version} + test + org.apache.commons commons-lang3 diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java index ee58ce44ee6b..d5d93f098703 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java @@ -19,11 +19,9 @@ import java.time.Duration; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.crt.io.EventLoopGroup; -import software.amazon.awssdk.crt.io.HostResolver; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; import software.amazon.awssdk.stability.tests.utils.RetryableTest; @@ -49,7 +47,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryPolicy(RetryPolicy.none())) + .retryStrategy(DefaultRetryStrategy.none())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java index 204fc48c8dbf..6ee551ee664c 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java @@ -19,8 +19,8 @@ import java.time.Duration; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; import software.amazon.awssdk.stability.tests.utils.RetryableTest; @@ -44,7 +44,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b // Retry at test level - .retryPolicy(RetryPolicy.none()) + .retryStrategy(DefaultRetryStrategy.none()) .apiCallTimeout(Duration.ofMinutes(1))) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java index d7afa62bade0..83e01b448d33 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java @@ -2,8 +2,8 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; import software.amazon.awssdk.stability.tests.utils.RetryableTest; @@ -23,7 +23,7 @@ public class S3NettyAsyncStabilityTest extends S3BaseStabilityTest { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryPolicy(RetryPolicy.none())) + .retryStrategy(DefaultRetryStrategy.none())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3WithCrtAsyncHttpClientStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3WithCrtAsyncHttpClientStabilityTest.java index c92e5f691194..c0f77d8a55b5 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3WithCrtAsyncHttpClientStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3WithCrtAsyncHttpClientStabilityTest.java @@ -3,9 +3,9 @@ import java.time.Duration; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; import software.amazon.awssdk.stability.tests.utils.RetryableTest; @@ -29,7 +29,7 @@ public class S3WithCrtAsyncHttpClientStabilityTest extends S3BaseStabilityTest { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryPolicy(RetryPolicy.none())) + .retryStrategy(DefaultRetryStrategy.none())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java index 20bca1557984..3ee21c8783d7 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java @@ -18,11 +18,9 @@ import java.time.Duration; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.crt.io.EventLoopGroup; -import software.amazon.awssdk.crt.io.HostResolver; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.retries.DefaultRetryStrategy; import software.amazon.awssdk.services.sqs.SqsAsyncClient; import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; import software.amazon.awssdk.stability.tests.utils.RetryableTest; @@ -52,7 +50,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryPolicy(RetryPolicy.none())) + .retryStrategy(DefaultRetryStrategy.none())) .build(); queueName = "sqscrtasyncstabilitytests" + System.currentTimeMillis(); From 4acd4e3f20b1c79e0f949d838779915e50548cdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 7 Jul 2023 14:25:05 -0700 Subject: [PATCH 18/32] Fix minor logging issues --- .../amazon/awssdk/retries/internal/BaseRetryStrategy.java | 2 +- .../awssdk/retries/internal/DefaultLegacyRetryStrategy.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java index def50765d37b..0c312a31388c 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java @@ -246,7 +246,7 @@ private void throwOnNonRetryableException(RefreshRetryTokenRequest request) { Throwable failure = request.failure(); if (isNonRetryableException(request)) { String message = nonRetryableExceptionMessage(token); - log.error(() -> message, failure); + log.debug(() -> message, failure); TokenBucket tokenBucket = tokenBucketStore.tokenBucketForScope(token.scope()); DefaultRetryToken refreshedToken = token.toBuilder() diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java index a4aea00b2f78..94c072bd63ea 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java @@ -28,7 +28,7 @@ @SdkInternalApi public final class DefaultLegacyRetryStrategy extends BaseRetryStrategy implements LegacyRetryStrategy { - private static final Logger LOG = Logger.loggerFor(DefaultLegacyRetryStrategy.class); + private static final Logger LOG = Logger.loggerFor(LegacyRetryStrategy.class); private final BackoffStrategy throttlingBackoffStrategy; private final int throttlingExceptionCost; private final Predicate treatAsThrottling; From 0d0d819aa4e3e5a420c2f99b1c503ca5cc64b9df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 7 Jul 2023 14:39:40 -0700 Subject: [PATCH 19/32] Update sdk version --- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 116fd8162723..2e11d984c496 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.95-SNAPSHOT + 2.20.102-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 64dfb146e3b6..ef14e00216e1 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.95-SNAPSHOT + 2.20.102-SNAPSHOT 4.0.0 From 802ae198b3971f89988c8e3ac7116a489ac5259c Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Tue, 18 Jul 2023 08:49:38 -0700 Subject: [PATCH 20/32] Add support for retryable trait (#4170) --- .../awssdk/codegen/AddExceptionShapes.java | 11 +- .../amazon/awssdk/codegen/AddShapes.java | 2 + .../model/intermediate/ShapeModel.java | 21 +++ .../codegen/model/service/RetryableTrait.java | 33 +++++ .../awssdk/codegen/model/service/Shape.java | 14 ++ .../codegen/poet/model/AwsServiceModel.java | 24 ++++ .../RetryableExceptionClassSpecTest.java | 61 ++++++++ .../model/exceptions/customization.config | 2 + ...onserviceinternalservererrorexception.java | 127 +++++++++++++++++ .../jsonserviceinvalidinputexception.java | 121 ++++++++++++++++ .../jsonservicethrottlingexception.java | 131 ++++++++++++++++++ .../poet/model/exceptions/service-2.json | 107 ++++++++++++++ .../core/exception/SdkServiceException.java | 23 ++- .../retry/SdkDefaultRetryStrategy.java | 12 +- .../amazon/awssdk/core/retry/RetryUtils.java | 10 ++ 15 files changed, 687 insertions(+), 12 deletions(-) create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/model/service/RetryableTrait.java create mode 100644 codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/RetryableExceptionClassSpecTest.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/customization.config create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java create mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/service-2.json diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddExceptionShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddExceptionShapes.java index 88606e4eb393..be9f2c7ad6b9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddExceptionShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddExceptionShapes.java @@ -44,9 +44,10 @@ private Map constructExceptionShapes() { // Java shape models, to be constructed Map javaShapes = new HashMap<>(); - for (Map.Entry shape : getServiceModel().getShapes().entrySet()) { - if (shape.getValue().isException()) { - String errorShapeName = shape.getKey(); + for (Map.Entry kvp : getServiceModel().getShapes().entrySet()) { + if (kvp.getValue().isException()) { + Shape shape = kvp.getValue(); + String errorShapeName = kvp.getKey(); String javaClassName = getNamingStrategy().getExceptionName(errorShapeName); ShapeModel exceptionShapeModel = generateShapeModel(javaClassName, @@ -55,8 +56,10 @@ private Map constructExceptionShapes() { exceptionShapeModel.setType(ShapeType.Exception.getValue()); exceptionShapeModel.setErrorCode(getErrorCode(errorShapeName)); exceptionShapeModel.setHttpStatusCode(getHttpStatusCode(errorShapeName)); + exceptionShapeModel.withIsRetryable(shape.isRetryable()); + exceptionShapeModel.withIsThrottling(shape.isThrottling()); if (exceptionShapeModel.getDocumentation() == null) { - exceptionShapeModel.setDocumentation(shape.getValue().getDocumentation()); + exceptionShapeModel.setDocumentation(shape.getDocumentation()); } javaShapes.put(javaClassName, exceptionShapeModel); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index fe658eb40f3d..d1cbd0680e15 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -87,6 +87,8 @@ protected final ShapeModel generateShapeModel(String javaClassName, String shape shapeModel.withXmlNamespace(shape.getXmlNamespace()); shapeModel.withIsUnion(shape.isUnion()); shapeModel.withIsFault(shape.isFault()); + shapeModel.withIsRetryable(shape.isRetryable()); + shapeModel.withIsThrottling(shape.isThrottling()); boolean hasHeaderMember = false; boolean hasStatusCodeMember = false; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java index be9a779d6d01..098ea46bc7e4 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java @@ -75,6 +75,9 @@ public class ShapeModel extends DocumentationModel implements HasDeprecation { private boolean union; + private boolean retryable; + private boolean throttling; + public ShapeModel() { } @@ -648,4 +651,22 @@ public ShapeModel withIsFault(boolean fault) { this.fault = fault; return this; } + + public boolean isRetryable() { + return retryable; + } + + public ShapeModel withIsRetryable(boolean retryable) { + this.retryable = retryable; + return this; + } + + public boolean isThrottling() { + return throttling; + } + + public ShapeModel withIsThrottling(boolean throttling) { + this.throttling = throttling; + return this; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/RetryableTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/RetryableTrait.java new file mode 100644 index 000000000000..63f8d165d316 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/RetryableTrait.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.service; + +public class RetryableTrait { + + private Boolean throttling; + + public void setThrottling(boolean throttling) { + this.throttling = throttling; + } + + public Boolean getThrottling() { + return throttling; + } + + public boolean isThrottling() { + return Boolean.TRUE.equals(throttling); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java index a9add8824f0d..077e957c9771 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java @@ -78,6 +78,8 @@ public class Shape { private boolean union; + private RetryableTrait retryable; + public boolean isFault() { return fault; } @@ -345,4 +347,16 @@ public boolean isUnion() { public void setUnion(boolean union) { this.union = union; } + + public void setRetryable(RetryableTrait retryable) { + this.retryable = retryable; + } + + public boolean isRetryable() { + return retryable != null; + } + + public boolean isThrottling() { + return retryable != null && retryable.isThrottling(); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java index 5d3d359dcf1d..e2fbb81932b7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java @@ -34,6 +34,7 @@ import com.squareup.javapoet.WildcardTypeName; import java.io.Serializable; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -437,6 +438,7 @@ private List modelClassMethods() { methodSpecs.add(builderMethod()); methodSpecs.add(serializableBuilderClass()); methodSpecs.addAll(memberGetters()); + methodSpecs.addAll(retryableOverrides()); break; default: methodSpecs.addAll(addModifier(memberGetters(), FINAL)); @@ -689,6 +691,28 @@ private CodeBlock getterStatement(MemberModel model) { return CodeBlock.of("return $N;", modelVariable.getVariableName()); } + private List retryableOverrides() { + if (shapeModel.isRetryable()) { + MethodSpec isRetryable = MethodSpec.methodBuilder("isRetryableException") + .addAnnotation(Override.class) + .addModifiers(PUBLIC) + .returns(TypeName.BOOLEAN) + .addStatement("return true") + .build(); + if (shapeModel.isThrottling()) { + MethodSpec isThrottling = MethodSpec.methodBuilder("isThrottlingException") + .addAnnotation(Override.class) + .addModifiers(PUBLIC) + .returns(TypeName.BOOLEAN) + .addStatement("return true") + .build(); + return Arrays.asList(isRetryable, isThrottling); + } + return Arrays.asList(isRetryable); + } + return emptyList(); + } + private List nestedModelClassTypes() { List nestedClasses = new ArrayList<>(); switch (shapeModel.getShapeType()) { diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/RetryableExceptionClassSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/RetryableExceptionClassSpecTest.java new file mode 100644 index 000000000000..22c7d208cd33 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/RetryableExceptionClassSpecTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.model; + +import static org.hamcrest.MatcherAssert.assertThat; +import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; + +import java.io.File; +import java.io.IOException; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.C2jModels; +import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeType; +import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; + +public class RetryableExceptionClassSpecTest { + private static IntermediateModel intermediateModel; + + @BeforeAll + public static void setUp() throws IOException { + File serviceModelFile = + new File(RetryableExceptionClassSpecTest.class.getResource("exceptions/service-2.json").getFile()); + File customizationConfigFile = new File(RetryableExceptionClassSpecTest.class.getResource("exceptions/customization.config") + .getFile()); + intermediateModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, serviceModelFile)) + .customizationConfig(ModelLoaderUtils.loadModel(CustomizationConfig.class, customizationConfigFile)) + .build()) + .build(); + } + + @Test + public void serviceCodegen_withErrorsWithRetryableTrait_ShouldOverrideIsRetryableAndIsThrottling() { + intermediateModel.getShapes().forEach((name, shape) -> { + if (shape.getShapeType() == ShapeType.Exception) { + ClassSpec spec = new AwsServiceModel(intermediateModel, shape); + assertThat(spec, generatesTo( "exceptions/" + name.toLowerCase() + ".java")); + } + }); + } + +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/customization.config new file mode 100644 index 000000000000..311847daa5a0 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/customization.config @@ -0,0 +1,2 @@ +{} + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java new file mode 100644 index 000000000000..7175fbd58f3a --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java @@ -0,0 +1,127 @@ +package software.amazon.awssdk.services.json.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + *

    + * There was an internal server error. + *

    + */ +@Generated("software.amazon.awssdk:codegen") +public final class JsonServiceInternalServerErrorException extends JsonException implements + ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private static final long serialVersionUID = 1L; + + private JsonServiceInternalServerErrorException(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public boolean isRetryableException() { + return true; + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SdkPojo, CopyableBuilder, + JsonException.Builder { + @Override + Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); + + @Override + Builder message(String message); + + @Override + Builder requestId(String requestId); + + @Override + Builder statusCode(int statusCode); + + @Override + Builder cause(Throwable cause); + + @Override + Builder writableStackTrace(Boolean writableStackTrace); + } + + static final class BuilderImpl extends JsonException.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(JsonServiceInternalServerErrorException model) { + super(model); + } + + @Override + public BuilderImpl awsErrorDetails(AwsErrorDetails awsErrorDetails) { + this.awsErrorDetails = awsErrorDetails; + return this; + } + + @Override + public BuilderImpl message(String message) { + this.message = message; + return this; + } + + @Override + public BuilderImpl requestId(String requestId) { + this.requestId = requestId; + return this; + } + + @Override + public BuilderImpl statusCode(int statusCode) { + this.statusCode = statusCode; + return this; + } + + @Override + public BuilderImpl cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public BuilderImpl writableStackTrace(Boolean writableStackTrace) { + this.writableStackTrace = writableStackTrace; + return this; + } + + @Override + public JsonServiceInternalServerErrorException build() { + return new JsonServiceInternalServerErrorException(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java new file mode 100644 index 000000000000..437d4ffb774e --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java @@ -0,0 +1,121 @@ +package software.amazon.awssdk.services.json.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + *

    + * The request was rejected because an invalid or out-of-range value was supplied for an input parameter. + *

    + */ +@Generated("software.amazon.awssdk:codegen") +public final class JsonServiceInvalidInputException extends JsonException implements + ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private static final long serialVersionUID = 1L; + + private JsonServiceInvalidInputException(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SdkPojo, CopyableBuilder, JsonException.Builder { + @Override + Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); + + @Override + Builder message(String message); + + @Override + Builder requestId(String requestId); + + @Override + Builder statusCode(int statusCode); + + @Override + Builder cause(Throwable cause); + + @Override + Builder writableStackTrace(Boolean writableStackTrace); + } + + static final class BuilderImpl extends JsonException.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(JsonServiceInvalidInputException model) { + super(model); + } + + @Override + public BuilderImpl awsErrorDetails(AwsErrorDetails awsErrorDetails) { + this.awsErrorDetails = awsErrorDetails; + return this; + } + + @Override + public BuilderImpl message(String message) { + this.message = message; + return this; + } + + @Override + public BuilderImpl requestId(String requestId) { + this.requestId = requestId; + return this; + } + + @Override + public BuilderImpl statusCode(int statusCode) { + this.statusCode = statusCode; + return this; + } + + @Override + public BuilderImpl cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public BuilderImpl writableStackTrace(Boolean writableStackTrace) { + this.writableStackTrace = writableStackTrace; + return this; + } + + @Override + public JsonServiceInvalidInputException build() { + return new JsonServiceInvalidInputException(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java new file mode 100644 index 000000000000..95f032aeed0c --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java @@ -0,0 +1,131 @@ +package software.amazon.awssdk.services.json.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + *

    + * The request was denied due to request throttling. + *

    + */ +@Generated("software.amazon.awssdk:codegen") +public final class JsonServiceThrottlingException extends JsonException implements + ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private static final long serialVersionUID = 1L; + + private JsonServiceThrottlingException(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public boolean isRetryableException() { + return true; + } + + @Override + public boolean isThrottlingException() { + return true; + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SdkPojo, CopyableBuilder, JsonException.Builder { + @Override + Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); + + @Override + Builder message(String message); + + @Override + Builder requestId(String requestId); + + @Override + Builder statusCode(int statusCode); + + @Override + Builder cause(Throwable cause); + + @Override + Builder writableStackTrace(Boolean writableStackTrace); + } + + static final class BuilderImpl extends JsonException.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(JsonServiceThrottlingException model) { + super(model); + } + + @Override + public BuilderImpl awsErrorDetails(AwsErrorDetails awsErrorDetails) { + this.awsErrorDetails = awsErrorDetails; + return this; + } + + @Override + public BuilderImpl message(String message) { + this.message = message; + return this; + } + + @Override + public BuilderImpl requestId(String requestId) { + this.requestId = requestId; + return this; + } + + @Override + public BuilderImpl statusCode(int statusCode) { + this.statusCode = statusCode; + return this; + } + + @Override + public BuilderImpl cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public BuilderImpl writableStackTrace(Boolean writableStackTrace) { + this.writableStackTrace = writableStackTrace; + return this; + } + + @Override + public JsonServiceThrottlingException build() { + return new JsonServiceThrottlingException(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/service-2.json new file mode 100644 index 000000000000..ddc539a3b170 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/service-2.json @@ -0,0 +1,107 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "json-service", + "globalEndpoint": "json-service.amazonaws.com", + "jsonVersion": "1.1", + "protocol": "rest-json", + "serviceAbbreviation": "JsonService", + "serviceFullName": "JsonService", + "serviceId": "JsonService", + "signatureVersion": "v4", + "uid": "json-service-2010-05-08", + "xmlNamespace": "https://json-service.amazonaws.com/doc/2010-05-08/", + "awsQueryCompatible": {} + }, + "operations": { + "APostOperation": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "APostOperationRequest" + }, + "errors": [ + { + "shape": "JsonServiceInvalidInputException" + }, + { + "shape": "JsonServiceThrottlingException" + }, + { + "shape": "JsonServiceInternalServerError" + } + ], + "documentation": "

    Performs a post operation to the query service and has no output

    " + } + }, + "shapes": { + "APostOperationRequest": { + "type": "structure", + "members": { + "StringMember": { + "shape": "String", + "documentation": "

    A required member

    " + } + } + }, + "JsonServiceInvalidInputException": { + "type": "structure", + "members": { + "message": { + "shape": "String" + } + }, + "documentation": "

    The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

    ", + "error": { + "code": "InvalidInput", + "httpStatusCode": 400, + "senderFault": true + }, + "exception": true + }, + "JsonServiceThrottlingException": { + "type": "structure", + "members": { + "message": { + "shape": "String" + } + }, + "error": { + "httpStatusCode": 429, + "senderFault": true + }, + "retryable": { + "throttling": true + }, + "documentation": "

    The request was denied due to request throttling.

    ", + "exception": true + }, + "JsonServiceInternalServerError": { + "type": "structure", + "members": { + "message": { + "shape": "String" + } + }, + "documentation": "

    There was an internal server error.

    ", + "error": { + "code": "InvalidInput", + "httpStatusCode": 400, + "senderFault": true + }, + "fault": true, + "retryable": { + "throttling": false + }, + "exception": true + }, + "String": { + "type": "string" + } + }, + "documentation": "A service" +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java index 18dd0487fdf7..17fa56aa377b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java @@ -29,9 +29,6 @@ * reason, the service was not able to process it, and returned an error * response instead. *

    - * Exceptions that extend {@link SdkServiceException} are assumed to be able to be - * successfully retried. - *

    * SdkServiceException provides callers several pieces of information that can * be used to obtain more information about the error and why it occurred. * @@ -76,21 +73,35 @@ public int statusCode() { } /** - * Specifies whether or not an exception may have been caused by clock skew. + * Specifies whether an exception may have been caused by clock skew. */ public boolean isClockSkewException() { return false; } /** - * Specifies whether or not an exception is caused by throttling. + * Specifies whether an exception is caused by throttling. This method by default returns {@code true} if the status code is + * equal to 429 Too Many Requests + * but subclasses can override this method to signal that the specific subclass is considered a throttling exception. * - * @return true if the status code is 429, otherwise false. + * @return true if the exception is classified as throttling, otherwise false. + * @see #isRetryableException() */ public boolean isThrottlingException() { return statusCode == HttpStatusCode.THROTTLING; } + /** + * Specifies whether an exception is retryable. This method by default returns {@code false} but subclasses can override this + * value to signal that the specific subclass is considered retryable. + * + * @return true if the exception is classified as retryable, otherwise false. + * @see #isThrottlingException() + */ + public boolean isRetryableException() { + return false; + } + /** * @return {@link Builder} instance to construct a new {@link SdkServiceException}. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java index 3ba667110f92..ee81eb023a7a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java @@ -145,12 +145,13 @@ public static AdaptiveRetryStrategy.Builder adaptiveRetryStrategyBuilder() { * Configures a retry strategy using its builder to add SDK-generic retry exceptions. * * @param builder The builder to add the SDK-generic retry exceptions + * @param The type of the builder extending {@link RetryStrategy.Builder} * @return The given builder - * @param The type of the builder extending {@link RetryStrategy.Builder} */ public static > T configure(T builder) { - builder.retryOnException(SdkDefaultRetryStrategy::retryOnStatusCodes) + builder.retryOnException(SdkDefaultRetryStrategy::retryOnRetryableException) + .retryOnException(SdkDefaultRetryStrategy::retryOnStatusCodes) .retryOnException(SdkDefaultRetryStrategy::retryOnClockSkewException) .retryOnException(SdkDefaultRetryStrategy::retryOnThrottlingCondition); SdkDefaultRetrySetting.RETRYABLE_EXCEPTIONS.forEach(builder::retryOnExceptionOrCauseInstanceOf); @@ -164,6 +165,13 @@ private static boolean treatAsThrottling(Throwable t) { return false; } + private static boolean retryOnRetryableException(Throwable ex) { + if (ex instanceof SdkException) { + return RetryUtils.isRetryableException((SdkException) ex); + } + return false; + } + private static boolean retryOnStatusCodes(Throwable ex) { if (ex instanceof SdkServiceException) { SdkServiceException failure = (SdkServiceException) ex; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryUtils.java index 8e6d7cf2a422..f2b7e3c7c86d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryUtils.java @@ -66,4 +66,14 @@ public static boolean isClockSkewException(SdkException exception) { public static boolean isThrottlingException(SdkException exception) { return isServiceException(exception) && toServiceException(exception).isThrottlingException(); } + + /** + * Returns true if the specified exception is retryable. + * + * @param exception The exception to test. + * @return True if the exception resulted from an exception modeled as retryable. + */ + public static boolean isRetryableException(SdkException exception) { + return isServiceException(exception) && toServiceException(exception).isRetryableException(); + } } From 5aec47fcb67d976cf194fc93ce8698257b65491b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Thu, 7 Sep 2023 15:36:48 -0700 Subject: [PATCH 21/32] Merge master --- .all-contributorsrc | 27 + .changes/2.20.126.json | 30 + .changes/2.20.127.json | 48 + .changes/2.20.128.json | 24 + .changes/2.20.129.json | 30 + .changes/2.20.130.json | 24 + .changes/2.20.131.json | 42 + .changes/2.20.132.json | 42 + .changes/2.20.133.json | 30 + .changes/2.20.134.json | 78 + .changes/2.20.135.json | 30 + .changes/2.20.136.json | 48 + .changes/2.20.137.json | 30 + .changes/2.20.138.json | 72 + .changes/2.20.139.json | 96 + .changes/2.20.140.json | 42 + .changes/2.20.141.json | 72 + .changes/2.20.142.json | 48 + .changes/2.20.143.json | 30 + .../bugfix-AWSSDKforJavav2-85d899c.json | 6 + CHANGELOG.md | 489 +- README.md | 13 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- .../archetype-resources/template.yaml | 2 +- .../apachehttpclient/reference/template.yaml | 2 +- .../reference/template.yaml | 2 +- .../nettyclient/reference/template.yaml | 2 +- .../urlhttpclient/reference/template.yaml | 2 +- .../wafregionalclient/reference/template.yaml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 12 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 17 +- buildspecs/update-master-from-release.yml | 5 +- bundle/pom.xml | 4 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- .../amazon/awssdk/codegen/AddOperations.java | 1 + .../codegen/IntermediateModelBuilder.java | 10 +- .../compression/RequestCompression.java | 36 + .../codegen/docs/OperationDocProvider.java | 6 +- .../tasks/SyncClientGeneratorTasks.java | 8 - .../customization/CustomizationConfig.java | 23 + .../model/intermediate/IntermediateModel.java | 9 - .../model/intermediate/OperationModel.java | 11 + .../codegen/model/service/Operation.java | 11 + .../ClientSimpleMethodsIntegrationTests.java | 110 - .../poet/client/specs/JsonProtocolSpec.java | 5 +- .../poet/client/specs/QueryProtocolSpec.java | 7 +- .../poet/client/specs/XmlProtocolSpec.java | 7 +- .../traits/RequestCompressionTrait.java | 60 + .../client/PoetClientFunctionalTests.java | 7 - .../poet/client/c2j/json/customization.config | 2 +- .../poet/client/c2j/json/service-2.json | 10 + .../poet/client/c2j/query/service-2.json | 10 + .../client/c2j/rest-json/customization.config | 2 +- .../poet/client/c2j/rest-json/service-2.json | 10 + .../poet/client/c2j/xml/service-2.json | 10 + .../test-abstract-async-client-class.java | 31 +- .../test-abstract-sync-client-class.java | 27 +- .../test-aws-json-async-client-class.java | 64 + .../client/test-json-async-client-class.java | 64 + .../test-json-async-client-interface.java | 131 +- .../poet/client/test-json-client-class.java | 55 + .../client/test-json-client-interface.java | 122 +- .../client/test-query-async-client-class.java | 61 + .../poet/client/test-query-client-class.java | 52 + .../test-simple-methods-integ-class.java | 29 - .../client/test-xml-async-client-class.java | 60 + .../poet/client/test-xml-client-class.java | 50 + .../codegen/poet/model/customization.config | 2 +- .../customization.config | 2 +- .../model/xmlnamespace/customization.config | 2 +- .../poet/paginators/customization.config | 2 +- .../poet/transform/customization.config | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- .../AwsSignedChunkedEncodingInputStream.java | 1 - core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- .../awssdk/profiles/ProfileProperty.java | 12 + core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- .../regions/internal/region/endpoints.json | 442 +- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- .../awssdk/core/CompressionConfiguration.java | 141 + .../core/FileRequestBodyConfiguration.java | 209 + .../core/RequestOverrideConfiguration.java | 60 +- .../amazon/awssdk/core/SdkSystemSetting.java | 12 + .../awssdk/core/async/AsyncRequestBody.java | 51 +- .../AsyncRequestBodySplitConfiguration.java | 10 + .../awssdk/core/async/SdkPublisher.java | 14 + .../builder/SdkDefaultClientBuilder.java | 93 +- .../config/ClientOverrideConfiguration.java | 73 +- .../core/client/config/SdkClientOption.java | 7 + .../interceptor/SdkExecutionAttribute.java | 1 - .../SdkInternalExecutionAttribute.java | 7 + .../ChecksumCalculatingAsyncRequestBody.java | 45 +- .../core/internal/async/ChunkBuffer.java | 54 +- .../async/CompressionAsyncRequestBody.java | 160 + .../internal/async/FileAsyncRequestBody.java | 98 +- .../FileAsyncRequestBodySplitHelper.java | 185 + .../internal/async/SplittingPublisher.java | 48 +- .../core/internal/compression/Compressor.java | 74 + .../internal/compression/CompressorType.java | 115 + .../internal/compression/GzipCompressor.java | 55 + .../internal/http/AmazonAsyncHttpClient.java | 2 + .../internal/http/AmazonSyncHttpClient.java | 2 + ...binedResponseAsyncHttpResponseHandler.java | 1 + .../stages/ApiCallMetricCollectionStage.java | 2 + .../AsyncApiCallMetricCollectionStage.java | 2 + .../pipeline/stages/CompressRequestStage.java | 208 + .../pipeline/stages/HttpChecksumStage.java | 6 +- .../interceptor/trait/RequestCompression.java | 93 + .../io/AwsChunkedEncodingInputStream.java | 90 +- .../internal/io/AwsChunkedInputStream.java | 90 + .../io/AwsCompressionInputStream.java | 170 + ...AwsUnsignedChunkedEncodingInputStream.java | 43 - ...uffer.java => UnderlyingStreamBuffer.java} | 6 +- .../CompressionContentStreamProvider.java | 55 + .../core/internal/util/ChunkContentUtils.java | 61 +- .../core/internal/util/MetricUtils.java | 21 + .../awssdk/core/metrics/CoreMetric.java | 7 + .../RetryOnExceptionsCondition.java | 8 +- .../amazon/awssdk/core/util/SdkUserAgent.java | 4 +- .../core/CompressionConfigurationTest.java | 43 + .../FileRequestBodyConfigurationTest.java | 73 + .../awssdk/core/async/ChunkBufferTest.java | 118 +- .../CompressionAsyncRequestBodyTckTest.java | 111 + .../awssdk/core/async/SdkPublishersTest.java | 18 + .../AwsChunkedEncodingInputStreamTest.java | 9 +- .../core/compression/CompressorTypeTest.java | 48 + ...ecksumCalculatingAsyncRequestBodyTest.java | 175 +- .../CompressionAsyncRequestBodyTest.java | 173 + .../FileAsyncRequestBodySplitHelperTest.java | 96 + .../async/FileAsyncRequestBodyTest.java | 86 + .../async/SplittingPublisherTest.java | 53 +- .../async/SplittingPublisherTestUtils.java | 70 + .../compression/GzipCompressorTest.java | 56 + .../io/AwsCompressionInputStreamTest.java | 93 + docs/LaunchChangelog.md | 1 + http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 15 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- .../extensions/AtomicCounterExtension.java | 40 +- .../mapper/annotations/DynamoDbBean.java | 2 +- .../AtomicCounterExtensionTest.java | 35 +- .../functionaltests/AtomicCounterTest.java | 28 +- services-custom/iam-policy-builder/pom.xml | 2 +- .../awssdk/policybuilder/iam/IamPolicy.java | 5 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 340 +- .../codegen-resources/endpoint-tests.json | 1509 +----- .../codegen-resources/service-2.json | 70 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 64 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 11 +- services/appstream/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 84 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 13 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/billingconductor/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 432 +- .../codegen-resources/service-2.json | 59 + services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 439 +- services/chimesdkmeetings/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 29 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 101 +- .../codegen-resources/service-2.json | 71 +- services/cloud9/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 341 +- .../codegen-resources/endpoint-tests.json | 262 +- .../codegen-resources/service-2.json | 80 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- .../codegen-resources/service-2.json | 21 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- .../cloudwatch/CloudWatchIntegrationTest.java | 84 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 +- .../codegen-resources/service-2.json | 14 +- services/cloudwatchevents/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 349 +- .../codegen-resources/endpoint-tests.json | 1605 +----- .../codegen-resources/service-2.json | 2 +- services/cloudwatchlogs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 341 +- .../codegen-resources/endpoint-tests.json | 1322 +---- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 178 +- services/codedeploy/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 60 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 458 +- services/config/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/connect/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 12 + .../codegen-resources/service-2.json | 781 ++- services/connectcampaigns/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 341 +- .../codegen-resources/endpoint-tests.json | 264 +- .../codegen-resources/service-2.json | 142 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 +- .../codegen-resources/endpoint-tests.json | 96 +- .../codegen-resources/service-2.json | 165 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/costexplorer/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 510 +- .../codegen-resources/service-2.json | 10 +- services/customerprofiles/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 175 +- services/databasemigration/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- .../codegen-resources/service-2.json | 201 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- .../codegen-resources/service-2.json | 6 +- services/devicefarm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- .../codegen-resources/customization.config | 7 +- .../codegen-resources/service-2.json | 136 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 27 +- services/efs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/elasticbeanstalk/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 +- .../codegen-resources/service-2.json | 4 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 566 +-- .../codegen-resources/endpoint-tests.json | 114 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 170 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- .../firehose/ServiceIntegrationTest.java | 132 - services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- .../codegen-resources/service-2.json | 50 +- services/gamelift/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 233 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 4 +- services/glue/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 48 + services/grafana/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 9 +- services/greengrass/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/health/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 450 +- .../codegen-resources/service-2.json | 143 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/identitystore/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 +- .../codegen-resources/endpoint-tests.json | 144 +- .../codegen-resources/service-2.json | 36 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iot1clickdevices/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 8 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 341 +- .../codegen-resources/endpoint-tests.json | 842 +-- .../codegen-resources/service-2.json | 38 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisanalytics/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 400 +- .../codegen-resources/service-2.json | 114 +- services/medialive/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 86 + services/mediapackage/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/endpoint-tests.json | 50 + .../codegen-resources/service-2.json | 8 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mediastoredata/pom.xml | 2 +- .../MediaStoreDataIntegrationTestBase.java | 152 + ...stCompressionStreamingIntegrationTest.java | 173 + ...ransferEncodingChunkedIntegrationTest.java | 127 +- .../src/it/resources/log4j2.properties | 38 + .../codegen-resources/customization.config | 2 +- services/mediatailor/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 5 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mq/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mturk/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/neptunedata/pom.xml | 60 + .../codegen-resources/customization.config | 3 + .../codegen-resources/endpoint-rule-set.json | 350 ++ .../codegen-resources/endpoint-tests.json | 314 ++ .../codegen-resources/paginators-1.json | 4 + .../codegen-resources/service-2.json | 4512 +++++++++++++++++ services/networkfirewall/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 28 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 12 + .../codegen-resources/service-2.json | 1039 +++- .../codegen-resources/waiters-2.json | 48 + services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 1295 ++--- .../codegen-resources/endpoint-tests.json | 148 +- .../codegen-resources/service-2.json | 86 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 123 +- .../codegen-resources/service-2.json | 38 +- services/pcaconnectorad/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 ++ .../codegen-resources/endpoint-tests.json | 314 ++ .../codegen-resources/paginators-1.json | 34 + .../codegen-resources/service-2.json | 2836 +++++++++++ services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/endpoint-tests.json | 50 + .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 636 ++- services/pinpoint/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointemail/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 3 +- services/pom.xml | 4 +- services/pricing/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 30 + .../codegen-resources/service-2.json | 173 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 122 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 44 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 7 +- .../s3/S3PresignerIntegrationTest.java | 33 + .../MultipartConfigurationResolver.java | 53 + .../multipart/MultipartS3AsyncClient.java | 20 +- .../multipart/UploadObjectHelper.java | 16 +- .../UploadWithKnownContentLengthHelper.java | 4 +- .../internal/signing/DefaultS3Presigner.java | 19 + .../s3/multipart/MultipartConfiguration.java | 16 +- .../services/s3/presigner/S3Presigner.java | 47 + .../model/DeleteObjectPresignRequest.java | 138 + .../model/PresignedDeleteObjectRequest.java | 107 + .../codegen-resources/endpoint-tests.json | 235 +- .../awssdk/services/s3/S3PresignerTest.java | 120 +- .../MultipartConfigurationResolverTest.java | 83 + .../multipart/UploadObjectHelperTest.java | 26 +- services/s3control/pom.xml | 2 +- .../EndpointAddressInterceptor.java | 223 - .../codegen-resources/endpoint-rule-set.json | 3669 ++++++-------- .../codegen-resources/endpoint-tests.json | 196 +- .../EndpointAddressInterceptorTest.java | 323 -- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- .../codegen-resources/service-2.json | 70 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 400 +- .../codegen-resources/endpoint-tests.json | 98 +- .../codegen-resources/service-2.json | 175 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 163 +- services/securitylake/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 167 +- .../codegen-resources/service-2.json | 3 +- .../serverlessapplicationrepository/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 343 +- .../codegen-resources/endpoint-tests.json | 1288 +---- .../codegen-resources/service-2.json | 235 +- services/ses/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/sesv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 645 ++- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 167 +- .../codegen-resources/service-2.json | 5 + services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- .../codegen-resources/service-2.json | 4 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 161 +- .../codegen-resources/service-2.json | 97 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 187 +- .../codegen-resources/service-2.json | 59 +- services/waf/pom.xml | 2 +- .../waf/customization.config | 2 +- .../wafregional/customization.config | 2 +- services/wafv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 34 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/workspacesweb/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 111 +- services/xray/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- .../customresponsemetadata/service-2.json | 47 + .../services/AsyncRequestCompressionTest.java | 205 + .../services/RequestCompressionTest.java | 231 + .../services/metrics/CoreMetricsTest.java | 38 +- .../async/BaseAsyncCoreMetricsTest.java | 2 + test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- .../awssdk/protocol/tests/AsyncFaultTest.java | 36 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/.scripts/benchmark | 2 +- .../.scripts/create_benchmark_files | 25 +- test/s3-benchmarks/README.md | 28 +- test/s3-benchmarks/pom.xml | 12 +- .../BaseJavaS3ClientBenchmark.java | 130 + .../BaseTransferManagerBenchmark.java | 22 +- .../awssdk/s3benchmarks/BenchmarkRunner.java | 43 +- .../JavaS3ClientCopyBenchmark.java | 44 + .../JavaS3ClientUploadBenchmark.java | 95 + .../TransferManagerBenchmark.java | 27 + .../TransferManagerBenchmarkConfig.java | 80 +- .../src/main/resources/log4j2.properties | 3 + test/sdk-benchmarks/pom.xml | 9 +- .../benchmark/BenchmarkResultProcessor.java | 24 +- .../awssdk/benchmark/BenchmarkRunner.java | 94 +- .../benchmark/stats/SdkBenchmarkParams.java | 27 +- .../utils/BenchmarkProcessorOutput.java | 44 + .../amazon/awssdk/benchmark/baseline.json | 526 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- .../service/http/MockAsyncHttpClient.java | 63 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 6 +- .../pom.xml | 6 +- utils/pom.xml | 2 +- .../amazon/awssdk/utils/DateUtils.java | 11 + .../amazon/awssdk/utils/Validate.java | 13 + .../async/AddingTrailingDataSubscriber.java | 171 + .../amazon/awssdk/utils/DateUtilsTest.java | 11 + .../amazon/awssdk/utils/ValidateTest.java | 14 + .../AddingTrailingDataSubscriberTckTest.java | 75 + .../AddingTrailingDataSubscriberTest.java | 99 + 822 files changed, 37219 insertions(+), 21279 deletions(-) create mode 100644 .changes/2.20.126.json create mode 100644 .changes/2.20.127.json create mode 100644 .changes/2.20.128.json create mode 100644 .changes/2.20.129.json create mode 100644 .changes/2.20.130.json create mode 100644 .changes/2.20.131.json create mode 100644 .changes/2.20.132.json create mode 100644 .changes/2.20.133.json create mode 100644 .changes/2.20.134.json create mode 100644 .changes/2.20.135.json create mode 100644 .changes/2.20.136.json create mode 100644 .changes/2.20.137.json create mode 100644 .changes/2.20.138.json create mode 100644 .changes/2.20.139.json create mode 100644 .changes/2.20.140.json create mode 100644 .changes/2.20.141.json create mode 100644 .changes/2.20.142.json create mode 100644 .changes/2.20.143.json create mode 100644 .changes/next-release/bugfix-AWSSDKforJavav2-85d899c.json create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java delete mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientSimpleMethodsIntegrationTests.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java delete mode 100644 codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-simple-methods-integ-class.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java rename core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/{DecodedStreamBuffer.java => UnderlyingStreamBuffer.java} (93%) create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java delete mode 100644 services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java create mode 100644 services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java create mode 100644 services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java create mode 100644 services/mediastoredata/src/it/resources/log4j2.properties create mode 100644 services/neptunedata/pom.xml create mode 100644 services/neptunedata/src/main/resources/codegen-resources/customization.config create mode 100644 services/neptunedata/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/neptunedata/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/neptunedata/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/neptunedata/src/main/resources/codegen-resources/service-2.json create mode 100644 services/pcaconnectorad/pom.xml create mode 100644 services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/pcaconnectorad/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/pcaconnectorad/src/main/resources/codegen-resources/service-2.json create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java delete mode 100644 services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java delete mode 100644 services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java create mode 100644 test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java create mode 100644 utils/src/main/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriber.java create mode 100644 utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTckTest.java create mode 100644 utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTest.java diff --git a/.all-contributorsrc b/.all-contributorsrc index dac08ca7d53a..0315fda2a36e 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -841,6 +841,33 @@ "contributions": [ "code" ] + }, + { + "login": "chadlwilson", + "name": "Chad Wilson", + "avatar_url": "https://avatars.githubusercontent.com/u/29788154?v=4", + "profile": "https://www.buymeacoffee.com/chadwilson", + "contributions": [ + "code" + ] + }, + { + "login": "ManishDait", + "name": "Manish Dait", + "avatar_url": "https://avatars.githubusercontent.com/u/90558243?v=4", + "profile": "https://github.com/ManishDait", + "contributions": [ + "doc" + ] + }, + { + "login": "deki", + "name": "Dennis Kieselhorst", + "avatar_url": "https://avatars.githubusercontent.com/u/858827?v=4", + "profile": "http://www.dekies.de", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.changes/2.20.126.json b/.changes/2.20.126.json new file mode 100644 index 000000000000..7e7b70595221 --- /dev/null +++ b/.changes/2.20.126.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.126", + "date": "2023-08-14", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "contributor": "", + "description": "Fix SDK logging of certain fields." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "Documentation updates for AWS Transfer Family" + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "This release provides support for annotation store versioning and cross account sharing for Omics Analytics" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.127.json b/.changes/2.20.127.json new file mode 100644 index 000000000000..9f34e91bbfc7 --- /dev/null +++ b/.changes/2.20.127.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.127", + "date": "2023-08-15", + "entries": [ + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, \"None\" can be selected for this purpose." + }, + { + "type": "feature", + "category": "AWS Performance Insights", + "contributor": "", + "description": "AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Meetings", + "contributor": "", + "description": "Updated API documentation to include additional exceptions." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Documentation updates for Elastic Compute Cloud (EC2)." + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.128.json b/.changes/2.20.128.json new file mode 100644 index 000000000000..d82772868289 --- /dev/null +++ b/.changes/2.20.128.json @@ -0,0 +1,24 @@ +{ + "version": "2.20.128", + "date": "2023-08-16", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for the `SERVICE_ENDPOINT` metric. This metric represents the endpoint (scheme and authority) that the request was sent to." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Doc-only update to incorporate several doc bug fixes" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "This release updates the Custom Vocabulary Weight field to support a value of 0." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.129.json b/.changes/2.20.129.json new file mode 100644 index 000000000000..a3508927538d --- /dev/null +++ b/.changes/2.20.129.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.129", + "date": "2023-08-17", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Sends final checksum chunk and trailer when only onComplete() is called by upstream (empty content)" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints" + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "Amazon GameLift updates its instance types support." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.130.json b/.changes/2.20.130.json new file mode 100644 index 000000000000..a8140fe8ea8d --- /dev/null +++ b/.changes/2.20.130.json @@ -0,0 +1,24 @@ +{ + "version": "2.20.130", + "date": "2023-08-18", + "entries": [ + { + "type": "feature", + "category": "AWS CodeCommit", + "contributor": "", + "description": "Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.131.json b/.changes/2.20.131.json new file mode 100644 index 000000000000..64dbe75ffac4 --- /dev/null +++ b/.changes/2.20.131.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.131", + "date": "2023-08-21", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adding support for RDS Aurora Global Database Unplanned Failover" + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Fixed typos in description fields" + }, + { + "type": "feature", + "category": "FinSpace User Environment Management service", + "contributor": "", + "description": "Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.132.json b/.changes/2.20.132.json new file mode 100644 index 000000000000..81e001df3066 --- /dev/null +++ b/.changes/2.20.132.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.132", + "date": "2023-08-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2 - DynamoDb Enhanced", + "contributor": "", + "description": "Changes the default behavior of the DynamoDb Enhanced atomic counter extension to automatically filter out any counter attributes in the item to be updated. This allows users to read and update items without DynamoDb collision errors." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "contributor": "", + "description": "Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adding parameters to CreateCustomDbEngineVersion reserved for future use." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.133.json b/.changes/2.20.133.json new file mode 100644 index 000000000000..44b68082fe97 --- /dev/null +++ b/.changes/2.20.133.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.133", + "date": "2023-08-23", + "entries": [ + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "This release adds RootResourceId to GetRestApi response." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Marking fields as sensitive on BundleTask and GetPasswordData" + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Zayd (ar-AE)" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.134.json b/.changes/2.20.134.json new file mode 100644 index 000000000000..2f57f1afbed3 --- /dev/null +++ b/.changes/2.20.134.json @@ -0,0 +1,78 @@ +{ + "version": "2.20.134", + "date": "2023-08-24", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added API attributes that help in the monitoring of sessions." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "contributor": "", + "description": "Adds new source location AUTODETECT_SIGV4 access type." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release updates the supported versions for Percona XtraBackup in Aurora MySQL." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Add support for presigned `DeleteObject` in `S3Presigner`." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Updates to endpoint ruleset tests to address Smithy validation issues." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "Documentation updates for Amazon Verified Permissions." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.135.json b/.changes/2.20.135.json new file mode 100644 index 000000000000..f3eb1ccfd4a6 --- /dev/null +++ b/.changes/2.20.135.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.135", + "date": "2023-08-25", + "entries": [ + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Doc-only update to get doc bug fixes into the SDK docs" + }, + { + "type": "feature", + "category": "Amazon Detective", + "contributor": "", + "description": "Added protections to interacting with fields containing customer information." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.136.json b/.changes/2.20.136.json new file mode 100644 index 000000000000..8ab12fa01843 --- /dev/null +++ b/.changes/2.20.136.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.136", + "date": "2023-08-28", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "Add support for customizing time zone for backup window in backup plan rules." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances." + }, + { + "type": "feature", + "category": "AWS Organizations", + "contributor": "", + "description": "Documentation updates for permissions and links." + }, + { + "type": "feature", + "category": "Amazon Security Lake", + "contributor": "", + "description": "Remove incorrect regex enforcement on pagination tokens." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Web", + "contributor": "", + "description": "WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate." + }, + { + "type": "feature", + "category": "Service Quotas", + "contributor": "", + "description": "Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.137.json b/.changes/2.20.137.json new file mode 100644 index 000000000000..c28afa9ea9f1 --- /dev/null +++ b/.changes/2.20.137.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.137", + "date": "2023-08-29", + "entries": [ + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name." + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Documentation updates for project quotas." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "Add RetentionMode support for Runs." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.138.json b/.changes/2.20.138.json new file mode 100644 index 000000000000..1bae4067c056 --- /dev/null +++ b/.changes/2.20.138.json @@ -0,0 +1,72 @@ +{ + "version": "2.20.138", + "date": "2023-08-30", + "entries": [ + { + "type": "feature", + "category": "AWS App Runner", + "contributor": "", + "description": "App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository." + }, + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "This release marks some assessment metadata as sensitive. We added a sensitive trait to the following attributes: assessmentName, emailAddress, scope, createdBy, lastUpdatedBy, and userName." + }, + { + "type": "feature", + "category": "AWS Clean Rooms Service", + "contributor": "", + "description": "This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "Network Firewall increasing pagination token string length" + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "Add SAP source connector parallel and pagination feature" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "The DryRun field in EC2 APIs is no longer hidden and has been exposed. To determine the DryRun status for an EC2 operation using the dryRun field, users are now required to catch the Ec2Exception and extract the dryRun status from the errorCode of AwsErrorDetails within the Ec2Exception." + }, + { + "type": "feature", + "category": "Amazon NeptuneData", + "contributor": "", + "description": "Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings" + }, + { + "type": "feature", + "category": "PcaConnectorAd", + "contributor": "", + "description": "The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.139.json b/.changes/2.20.139.json new file mode 100644 index 000000000000..6a4caa38493d --- /dev/null +++ b/.changes/2.20.139.json @@ -0,0 +1,96 @@ +{ + "version": "2.20.139", + "date": "2023-08-31", + "entries": [ + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "contributor": "", + "description": "Adds new API DescribeEntityAggregatesForOrganization that retrieves entity aggregates across your organization. Also adds support for resource status filtering in DescribeAffectedEntitiesForOrganization, resource status aggregates in the DescribeEntityAggregates response, and new resource statuses." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "deki", + "description": "Use latest AWS Lambda Java 17 runtime version in SAM template of archetype" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Media Pipelines", + "contributor": "", + "description": "This release adds support for feature Voice Enhancement for Call Recording as part of Amazon Chime SDK call analytics." + }, + { + "type": "feature", + "category": "Amazon CloudHSM", + "contributor": "", + "description": "Deprecating CloudHSM Classic API Service." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "contributor": "", + "description": "Documentation updates for CloudWatch Events." + }, + { + "type": "feature", + "category": "AmazonConnectCampaignService", + "contributor": "", + "description": "Amazon Connect outbound campaigns has launched agentless dialing mode which enables customers to make automated outbound calls without agent engagement. This release updates three of the campaign management API's to support the new agentless dialing mode and the new dialing capacity field." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "Adds sensitive trait to various shapes in Customer Profiles API model." + }, + { + "type": "feature", + "category": "Amazon Connect Participant Service", + "contributor": "", + "description": "Amazon Connect Participant Service adds the ability to get a view resource using a view token, which is provided in a participant message, with the release of the DescribeView API." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for an account-level setting that you can use to configure the number of days for AWS Fargate task retirement." + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "contributor": "", + "description": "Updated \"type\" description for CreateChannel, UpdateChannel, Channel, and ChannelSummary." + }, + { + "type": "feature", + "category": "Amazon Managed Grafana", + "contributor": "", + "description": "Marking SAML RoleValues attribute as sensitive and updating VpcConfiguration attributes to match documentation." + }, + { + "type": "feature", + "category": "Amazon SageMaker Runtime", + "contributor": "", + "description": "This release adds a new InvokeEndpointWithResponseStream API to support streaming of model responses." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka Connect", + "contributor": "", + "description": "Minor model changes for Kafka Connect as well as endpoint updates." + }, + { + "type": "feature", + "category": "Payment Cryptography Data Plane", + "contributor": "", + "description": "Make KeyCheckValue field optional when using asymmetric keys as Key Check Values typically only apply to symmetric keys" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.140.json b/.changes/2.20.140.json new file mode 100644 index 000000000000..7e04391446e0 --- /dev/null +++ b/.changes/2.20.140.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.140", + "date": "2023-09-01", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Update Jackson to `2.15.2`." + }, + { + "type": "feature", + "category": "AWS SSO Identity Store", + "contributor": "", + "description": "New Identity Store content for China Region launch" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Media Pipelines", + "contributor": "", + "description": "This release adds support for the Voice Analytics feature for customer-owned KVS streams as part of the Amazon Chime SDK call analytics." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Amazon Connect adds the ability to read, create, update, delete, and list view resources, and adds the ability to read, create, delete, and list view versions." + }, + { + "type": "feature", + "category": "Amazon NeptuneData", + "contributor": "", + "description": "Removed the descriptive text in the introduction." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.141.json b/.changes/2.20.141.json new file mode 100644 index 000000000000..57cd31c8993b --- /dev/null +++ b/.changes/2.20.141.json @@ -0,0 +1,72 @@ +{ + "version": "2.20.141", + "date": "2023-09-05", + "entries": [ + { + "type": "feature", + "category": "AWSBillingConductor", + "contributor": "", + "description": "This release adds support for line item filtering in for the custom line item resource." + }, + { + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Added support for Ubuntu 22.04 that was not picked up in a previous Trebuchet request. Doc-only update." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release adds support to provide recommendations for G4dn and P3 instances that use NVIDIA GPUs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for RequestCompression trait to GZIP compress requests." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation only update for Amazon ECS." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Introducing Amazon EC2 C7gd, M7gd, and R7gd Instances with up to 3.8 TB of local NVMe-based SSD block-level storage. These instances are powered by AWS Graviton3 processors, delivering up to 25% better performance over Graviton2-based instances." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Improve Endpoint Ruleset test coverage." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Add support for feature integration with AWS Backup." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker Neo now supports data input shape derivation for Pytorch 2.0 and XGBoost compilation job for cloud instance targets. You can skip DataInputConfig field during compilation job creation. You can also access derived information from model in DescribeCompilationJob response." + }, + { + "type": "feature", + "category": "Amazon VPC Lattice", + "contributor": "", + "description": "This release adds Lambda event structure version config support for LAMBDA target groups. It also adds newline support for auth policies." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.142.json b/.changes/2.20.142.json new file mode 100644 index 000000000000..55c55d519dea --- /dev/null +++ b/.changes/2.20.142.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.142", + "date": "2023-09-06", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix an issue where the multirelease classes were not being shaded or removed." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Adds advanced Output Locking options for Epoch Locking: Custom Epoch and Jam Sync Time" + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "The targeted protection level of the Bot Control managed rule group now provides optional, machine-learning analysis of traffic statistics to detect some bot-related activity. You can enable or disable the machine learning functionality through the API." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "Adding OAuth2.0 support for servicenow connector." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds 'outpost' location type to the DescribeInstanceTypeOfferings API, allowing customers that have been allowlisted for outpost to query their offerings in the API." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "This release enables default UDP connection termination and disabling unhealthy target connection termination for Network Load Balancers." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.143.json b/.changes/2.20.143.json new file mode 100644 index 000000000000..08d3a0038b14 --- /dev/null +++ b/.changes/2.20.143.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.143", + "date": "2023-09-07", + "entries": [ + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Documentation updates for AWS Security Hub" + }, + { + "type": "feature", + "category": "AWS SimSpace Weaver", + "contributor": "", + "description": "BucketName and ObjectKey are now required for the S3Location data type. BucketName is now required for the S3Destination data type." + }, + { + "type": "feature", + "category": "Amazon NeptuneData", + "contributor": "", + "description": "Minor changes to send unsigned requests to Neptune clusters" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSSDKforJavav2-85d899c.json b/.changes/next-release/bugfix-AWSSDKforJavav2-85d899c.json new file mode 100644 index 000000000000..700666cdbbc0 --- /dev/null +++ b/.changes/next-release/bugfix-AWSSDKforJavav2-85d899c.json @@ -0,0 +1,6 @@ +{ + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed an issue in async client where the future would get stuck if there is a server error and the server fails to return response body that matches with the content length specified in the response header. See [#4354](https://github.com/aws/aws-sdk-java-v2/issues/4354)" +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e409994228b..222bad8f5c6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,490 @@ +# __2.20.143__ __2023-09-07__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Documentation updates for AWS Security Hub + +## __AWS SimSpace Weaver__ + - ### Features + - BucketName and ObjectKey are now required for the S3Location data type. BucketName is now required for the S3Destination data type. + +## __Amazon NeptuneData__ + - ### Features + - Minor changes to send unsigned requests to Neptune clusters + +# __2.20.142__ __2023-09-06__ +## __AWS Elemental MediaLive__ + - ### Features + - Adds advanced Output Locking options for Epoch Locking: Custom Epoch and Jam Sync Time + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix an issue where the multirelease classes were not being shaded or removed. + +## __AWS WAFV2__ + - ### Features + - The targeted protection level of the Bot Control managed rule group now provides optional, machine-learning analysis of traffic statistics to detect some bot-related activity. You can enable or disable the machine learning functionality through the API. + +## __Amazon Appflow__ + - ### Features + - Adding OAuth2.0 support for servicenow connector. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds 'outpost' location type to the DescribeInstanceTypeOfferings API, allowing customers that have been allowlisted for outpost to query their offerings in the API. + +## __Elastic Load Balancing__ + - ### Features + - This release enables default UDP connection termination and disabling unhealthy target connection termination for Network Load Balancers. + +# __2.20.141__ __2023-09-05__ +## __AWS Cloud9__ + - ### Features + - Added support for Ubuntu 22.04 that was not picked up in a previous Trebuchet request. Doc-only update. + +## __AWS Compute Optimizer__ + - ### Features + - This release adds support to provide recommendations for G4dn and P3 instances that use NVIDIA GPUs. + +## __AWS SDK for Java v2__ + - ### Features + - Add support for RequestCompression trait to GZIP compress requests. + - Updated endpoint and partition metadata. + +## __AWSBillingConductor__ + - ### Features + - This release adds support for line item filtering in for the custom line item resource. + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation only update for Amazon ECS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Introducing Amazon EC2 C7gd, M7gd, and R7gd Instances with up to 3.8 TB of local NVMe-based SSD block-level storage. These instances are powered by AWS Graviton3 processors, delivering up to 25% better performance over Graviton2-based instances. + +## __Amazon EventBridge__ + - ### Features + - Improve Endpoint Ruleset test coverage. + +## __Amazon Relational Database Service__ + - ### Features + - Add support for feature integration with AWS Backup. + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker Neo now supports data input shape derivation for Pytorch 2.0 and XGBoost compilation job for cloud instance targets. You can skip DataInputConfig field during compilation job creation. You can also access derived information from model in DescribeCompilationJob response. + +## __Amazon VPC Lattice__ + - ### Features + - This release adds Lambda event structure version config support for LAMBDA target groups. It also adds newline support for auth policies. + +# __2.20.140__ __2023-09-01__ +## __AWS SDK for Java v2__ + - ### Features + - Update Jackson to `2.15.2`. + - Updated endpoint and partition metadata. + +## __AWS SSO Identity Store__ + - ### Features + - New Identity Store content for China Region launch + +## __Amazon Chime SDK Media Pipelines__ + - ### Features + - This release adds support for the Voice Analytics feature for customer-owned KVS streams as part of the Amazon Chime SDK call analytics. + +## __Amazon Connect Service__ + - ### Features + - Amazon Connect adds the ability to read, create, update, delete, and list view resources, and adds the ability to read, create, delete, and list view versions. + +## __Amazon NeptuneData__ + - ### Features + - Removed the descriptive text in the introduction. + +# __2.20.139__ __2023-08-31__ +## __AWS Health APIs and Notifications__ + - ### Features + - Adds new API DescribeEntityAggregatesForOrganization that retrieves entity aggregates across your organization. Also adds support for resource status filtering in DescribeAffectedEntitiesForOrganization, resource status aggregates in the DescribeEntityAggregates response, and new resource statuses. + +## __AWS Lambda__ + - ### Features + - Use latest AWS Lambda Java 17 runtime version in SAM template of archetype + - Contributed by: [@deki](https://github.com/deki) + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Media Pipelines__ + - ### Features + - This release adds support for feature Voice Enhancement for Call Recording as part of Amazon Chime SDK call analytics. + +## __Amazon CloudHSM__ + - ### Features + - Deprecating CloudHSM Classic API Service. + +## __Amazon CloudWatch Events__ + - ### Features + - Documentation updates for CloudWatch Events. + +## __Amazon Connect Customer Profiles__ + - ### Features + - Adds sensitive trait to various shapes in Customer Profiles API model. + +## __Amazon Connect Participant Service__ + - ### Features + - Amazon Connect Participant Service adds the ability to get a view resource using a view token, which is provided in a participant message, with the release of the DescribeView API. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for an account-level setting that you can use to configure the number of days for AWS Fargate task retirement. + +## __Amazon Interactive Video Service__ + - ### Features + - Updated "type" description for CreateChannel, UpdateChannel, Channel, and ChannelSummary. + +## __Amazon Managed Grafana__ + - ### Features + - Marking SAML RoleValues attribute as sensitive and updating VpcConfiguration attributes to match documentation. + +## __Amazon SageMaker Runtime__ + - ### Features + - This release adds a new InvokeEndpointWithResponseStream API to support streaming of model responses. + +## __AmazonConnectCampaignService__ + - ### Features + - Amazon Connect outbound campaigns has launched agentless dialing mode which enables customers to make automated outbound calls without agent engagement. This release updates three of the campaign management API's to support the new agentless dialing mode and the new dialing capacity field. + +## __Managed Streaming for Kafka Connect__ + - ### Features + - Minor model changes for Kafka Connect as well as endpoint updates. + +## __Payment Cryptography Data Plane__ + - ### Features + - Make KeyCheckValue field optional when using asymmetric keys as Key Check Values typically only apply to symmetric keys + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@deki](https://github.com/deki) +# __2.20.138__ __2023-08-30__ +## __AWS App Runner__ + - ### Features + - App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository. + +## __AWS Audit Manager__ + - ### Features + - This release marks some assessment metadata as sensitive. We added a sensitive trait to the following attributes: assessmentName, emailAddress, scope, createdBy, lastUpdatedBy, and userName. + +## __AWS Clean Rooms Service__ + - ### Features + - This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results. + +## __AWS DataSync__ + - ### Features + - AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution. + +## __AWS Network Firewall__ + - ### Features + - Network Firewall increasing pagination token string length + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Appflow__ + - ### Features + - Add SAP source connector parallel and pagination feature + +## __Amazon Elastic Compute Cloud__ + - ### Features + - The DryRun field in EC2 APIs is no longer hidden and has been exposed. To determine the DryRun status for an EC2 operation using the dryRun field, users are now required to catch the Ec2Exception and extract the dryRun status from the errorCode of AwsErrorDetails within the Ec2Exception. + +## __Amazon NeptuneData__ + - ### Features + - Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings + +## __PcaConnectorAd__ + - ### Features + - The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity. + +# __2.20.137__ __2023-08-29__ +## __Amazon Cognito Identity Provider__ + - ### Features + - Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name. + +## __Amazon FSx__ + - ### Features + - Documentation updates for project quotas. + +## __Amazon Omics__ + - ### Features + - Add RetentionMode support for Runs. + +## __Amazon Simple Email Service__ + - ### Features + - Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights. + +# __2.20.136__ __2023-08-28__ +## __AWS Backup__ + - ### Features + - Add support for customizing time zone for backup window in backup plan rules. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances. + +## __AWS Organizations__ + - ### Features + - Documentation updates for permissions and links. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Security Lake__ + - ### Features + - Remove incorrect regex enforcement on pagination tokens. + +## __Amazon WorkSpaces Web__ + - ### Features + - WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate. + +## __Service Quotas__ + - ### Features + - Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account. + +# __2.20.135__ __2023-08-25__ +## __AWS CloudTrail__ + - ### Features + - Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch__ + - ### Features + - Doc-only update to get doc bug fixes into the SDK docs + +## __Amazon Detective__ + - ### Features + - Added protections to interacting with fields containing customer information. + +# __2.20.134__ __2023-08-24__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. + +## __AWS Elemental MediaLive__ + - ### Features + - MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. + +## __AWS Glue__ + - ### Features + - Added API attributes that help in the monitoring of sessions. + +## __AWS MediaTailor__ + - ### Features + - Adds new source location AUTODETECT_SIGV4 access type. + +## __AWS S3 Control__ + - ### Features + - Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. + +## __Amazon QuickSight__ + - ### Features + - Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. + +## __Amazon Relational Database Service__ + - ### Features + - This release updates the supported versions for Percona XtraBackup in Aurora MySQL. + +## __Amazon S3__ + - ### Features + - Add support for presigned `DeleteObject` in `S3Presigner`. + +## __Amazon Simple Storage Service__ + - ### Features + - Updates to endpoint ruleset tests to address Smithy validation issues. + +## __Amazon Verified Permissions__ + - ### Features + - Documentation updates for Amazon Verified Permissions. + +# __2.20.133__ __2023-08-23__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon API Gateway__ + - ### Features + - This release adds RootResourceId to GetRestApi response. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Marking fields as sensitive on BundleTask and GetPasswordData + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds 1 new voice - Zayd (ar-AE) + +# __2.20.132__ __2023-08-22__ +## __AWS Cost Explorer Service__ + - ### Features + - This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags. + +## __AWS Global Accelerator__ + - ### Features + - Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SDK for Java v2 - DynamoDb Enhanced__ + - ### Bugfixes + - Changes the default behavior of the DynamoDb Enhanced atomic counter extension to automatically filter out any counter attributes in the item to be updated. This allows users to read and update items without DynamoDb collision errors. + +## __Amazon Relational Database Service__ + - ### Features + - Adding parameters to CreateCustomDbEngineVersion reserved for future use. + +## __Amazon Verified Permissions__ + - ### Features + - Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50. + +# __2.20.131__ __2023-08-21__ +## __AWS Cloud9__ + - ### Features + - Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9 + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted. + +## __Amazon Relational Database Service__ + - ### Features + - Adding support for RDS Aurora Global Database Unplanned Failover + +## __Amazon Route 53 Domains__ + - ### Features + - Fixed typos in description fields + +## __FinSpace User Environment Management service__ + - ### Features + - Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual. + +# __2.20.130__ __2023-08-18__ +## __AWS CodeCommit__ + - ### Features + - Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities. + +# __2.20.129__ __2023-08-17__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Sends final checksum chunk and trailer when only onComplete() is called by upstream (empty content) + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints + +## __Amazon GameLift__ + - ### Features + - Amazon GameLift updates its instance types support. + +# __2.20.128__ __2023-08-16__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Add support for the `SERVICE_ENDPOINT` metric. This metric represents the endpoint (scheme and authority) that the request was sent to. + +## __Amazon CloudWatch__ + - ### Features + - Doc-only update to incorporate several doc bug fixes + +## __Amazon Lex Model Building V2__ + - ### Features + - This release updates the Custom Vocabulary Weight field to support a value of 0. + +# __2.20.127__ __2023-08-15__ +## __AWS Glue__ + - ### Features + - AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, "None" can be selected for this purpose. + +## __AWS Performance Insights__ + - ### Features + - AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Meetings__ + - ### Features + - Updated API documentation to include additional exceptions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for Elastic Compute Cloud (EC2). + +## __Amazon Route 53 Domains__ + - ### Features + - Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response + +# __2.20.126__ __2023-08-14__ +## __AWS Elemental MediaPackage__ + - ### Features + - Fix SDK logging of certain fields. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS Transfer Family__ + - ### Features + - Documentation updates for AWS Transfer Family + +## __Amazon Omics__ + - ### Features + - This release provides support for annotation store versioning and cross account sharing for Omics Analytics + # __2.20.125__ __2023-08-11__ ## __AWS Config__ - ### Features @@ -1184,7 +1671,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index e5589f0ea261..6550ff19b439 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-93-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-96-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.20.125 + 2.20.143 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.20.125 + 2.20.143 software.amazon.awssdk s3 - 2.20.125 + 2.20.143 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.20.125 + 2.20.143 ``` @@ -311,6 +311,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Olivier L Applin
    Olivier L Applin

    💻 Adrian Chlebosz
    Adrian Chlebosz

    💻 + Chad Wilson
    Chad Wilson

    💻 + Manish Dait
    Manish Dait

    📖 + Dennis Kieselhorst
    Dennis Kieselhorst

    💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 3f1e05ce2f61..185ed0a02a0d 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 2f26b4591b66..c9941332f2d4 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml b/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml index 5d6bb1592002..28847ba71c7b 100644 --- a/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/template.yaml @@ -6,7 +6,7 @@ Resources: ${handlerClassName}Function: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: ${package}.${handlerClassName}::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml index 513c0aed185b..3c9a904981b0 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/template.yaml @@ -6,7 +6,7 @@ Resources: MyApacheFunctionFunction: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: software.amazonaws.test.MyApacheFunction::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml index 797f24dc0751..a95d14c736d9 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/template.yaml @@ -6,7 +6,7 @@ Resources: MyDynamoDbStreamsFunctionFunction: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: software.amazonaws.test.MyDynamoDbStreamsFunction::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml index e674e2599da3..b665d80fc983 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/template.yaml @@ -6,7 +6,7 @@ Resources: MyNettyFunctionFunction: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: software.amazonaws.test.MyNettyFunction::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml index ca0bb619fd4e..18e2d0deb753 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/template.yaml @@ -6,7 +6,7 @@ Resources: AppFunction: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: software.amazonaws.test.App::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml index 70ee17fae8a3..f2c0e30feb79 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/template.yaml @@ -6,7 +6,7 @@ Resources: MyWafRegionalFunctionFunction: Type: AWS::Serverless::Function Properties: - Runtime: java8 + Runtime: java17 Handler: software.amazonaws.test.MyWafRegionalFunction::handleRequest Timeout: 60 MemorySize: 512 diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index ba43c242541f..026ab79a43ef 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 8948948ed7de..9826f72da31f 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 9330b8937e74..1f307028e68f 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../pom.xml aws-sdk-java @@ -1788,6 +1788,16 @@ Amazon AutoScaling, etc). managedblockchainquery ${awsjavasdk.version} + + software.amazon.awssdk + pcaconnectorad + ${awsjavasdk.version} + + + software.amazon.awssdk + neptunedata + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 4cc8aebcb6e0..7a76eb5731c1 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index a8f66acbe388..ac0fde385860 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../pom.xml bom @@ -167,6 +167,11 @@ aws-crt-client ${awsjavasdk.version} + + software.amazon.awssdk + auth-crt + ${awsjavasdk.version} + software.amazon.awssdk iam-policy-builder @@ -1938,6 +1943,16 @@ managedblockchainquery ${awsjavasdk.version} + + software.amazon.awssdk + pcaconnectorad + ${awsjavasdk.version} + + + software.amazon.awssdk + neptunedata + ${awsjavasdk.version} + diff --git a/buildspecs/update-master-from-release.yml b/buildspecs/update-master-from-release.yml index 7738fa2e5fc4..2ab0d0f6889f 100644 --- a/buildspecs/update-master-from-release.yml +++ b/buildspecs/update-master-from-release.yml @@ -26,13 +26,15 @@ phases: - MINOR=$(echo $RELEASE_VERSION | cut -d'.' -f2) - POINT=$(echo $RELEASE_VERSION | cut -d'.' -f3) - NEW_VERSION_SNAPSHOT="$MAJOR.$MINOR.$((POINT + 1))-SNAPSHOT" - - echo "New shapshot version - $NEW_VERSION_SNAPSHOT" + - echo "New snapshot version - $NEW_VERSION_SNAPSHOT" - - git checkout master - git merge public/release --no-edit - - MASTER_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - echo "Master version - $MASTER_VERSION" + - + - trap 'echo "Error: Failed to update versions"; exit 1' ERR - | if [ "$MASTER_VERSION" != "$NEW_VERSION_SNAPSHOT" ]; then @@ -43,4 +45,5 @@ phases: git commit -am "Update to next snapshot version: $NEW_VERSION_SNAPSHOT" fi - + - git status - git push diff --git a/bundle/pom.xml b/bundle/pom.xml index 4f39da56f129..ab6d529ab801 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT bundle jar @@ -63,7 +63,7 @@ org.apache.maven.plugins maven-shade-plugin - 3.1.0 + 3.5.0 diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 98612d5457c7..7e78f834b3df 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index e2bc275177ff..4ebcce75f315 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 7ac847882e31..bd454aaea787 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index f0c2a0650b40..547a928f64d9 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index 2db612a5f6dd..79bb81470f5e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -165,6 +165,7 @@ public Map constructOperations() { operationModel.setEndpointTrait(op.getEndpoint()); operationModel.setHttpChecksumRequired(op.isHttpChecksumRequired()); operationModel.setHttpChecksum(op.getHttpChecksum()); + operationModel.setRequestCompression(op.getRequestCompression()); operationModel.setStaticContextParams(op.getStaticContextParams()); Input input = op.getInput(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java index 66df8fe572b1..74374b40cd36 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java @@ -253,14 +253,16 @@ private void setSimpleMethods(IntermediateModel model) { } else { inputShape.setSimpleMethod(false); - boolean methodIsNotBlacklisted = !config.getBlacklistedSimpleMethods().contains(methodName) || - config.getBlacklistedSimpleMethods().stream().noneMatch(m -> m.equals("*")); + boolean methodIsNotExcluded = !config.getExcludedSimpleMethods().contains(methodName) || + config.getExcludedSimpleMethods().stream().noneMatch(m -> m.equals("*")) || + !config.getBlacklistedSimpleMethods().contains(methodName) || + config.getBlacklistedSimpleMethods().stream().noneMatch(m -> m.equals("*")); boolean methodHasNoRequiredMembers = !CollectionUtils.isNullOrEmpty(inputShape.getRequired()); boolean methodIsNotStreaming = !operation.isStreaming(); boolean methodHasSimpleMethodVerb = methodName.matches(Constant.APPROVED_SIMPLE_METHOD_VERBS); - if (methodIsNotBlacklisted && methodHasNoRequiredMembers && methodIsNotStreaming && methodHasSimpleMethodVerb) { - log.warn("A potential simple method exists that isn't whitelisted or blacklisted: " + methodName); + if (methodIsNotExcluded && methodHasNoRequiredMembers && methodIsNotStreaming && methodHasSimpleMethodVerb) { + log.warn("A potential simple method exists that isn't explicitly excluded or included: " + methodName); } } }); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java new file mode 100644 index 000000000000..69d53bc7e30f --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.compression; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Class to map the RequestCompression trait of an operation. + */ +@SdkInternalApi +public class RequestCompression { + + private List encodings; + + public List getEncodings() { + return encodings; + } + + public void setEncodings(List encodings) { + this.encodings = encodings; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java index 65dc3346fdda..d78e37b6c3ba 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java @@ -18,6 +18,7 @@ import static software.amazon.awssdk.codegen.internal.DocumentationUtils.createLinkToServiceDocumentation; import static software.amazon.awssdk.codegen.internal.DocumentationUtils.stripHtmlTags; +import com.squareup.javapoet.ClassName; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -171,10 +172,13 @@ final List> getThrows() { final void emitRequestParm(DocumentationBuilder docBuilder) { String parameterDocs = stripHtmlTags(opModel.getInput().getDocumentation()); + String shapeName = opModel.getInputShape().getShapeName(); + ClassName fcqn = ClassName.get(model.getMetadata().getFullModelPackageName(), shapeName); + if (config.isConsumerBuilder()) { docBuilder.param(opModel.getInput().getVariableName(), "A {@link Consumer} that will call methods on {@link %s.Builder} to create a request. %s", - opModel.getInputShape().getC2jName(), + fcqn.toString(), parameterDocs); } else { docBuilder.param(opModel.getInput().getVariableName(), parameterDocs); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/SyncClientGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/SyncClientGeneratorTasks.java index e09550480846..b18229efbe40 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/SyncClientGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/SyncClientGeneratorTasks.java @@ -22,7 +22,6 @@ import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; import software.amazon.awssdk.codegen.poet.builder.SyncClientBuilderClass; import software.amazon.awssdk.codegen.poet.builder.SyncClientBuilderInterface; -import software.amazon.awssdk.codegen.poet.client.ClientSimpleMethodsIntegrationTests; import software.amazon.awssdk.codegen.poet.client.DelegatingSyncClientClass; import software.amazon.awssdk.codegen.poet.client.SyncClientClass; import software.amazon.awssdk.codegen.poet.client.SyncClientInterface; @@ -48,9 +47,6 @@ protected List createTasks() throws Exception { tasks.add(createClientBuilderTask()); tasks.add(createClientInterfaceTask()); tasks.add(createClientBuilderInterfaceTask()); - if (!model.simpleMethodsRequiringTesting().isEmpty()) { - tasks.add(createClientSimpleMethodsTest()); - } if (model.getEndpointOperation().isPresent()) { tasks.add(createEndpointDiscoveryCacheLoaderTask()); } @@ -80,10 +76,6 @@ private GeneratorTask createClientBuilderInterfaceTask() throws IOException { return createPoetGeneratorTask(new SyncClientBuilderInterface(model)); } - private GeneratorTask createClientSimpleMethodsTest() throws IOException { - return createPoetGeneratorTestTask(new ClientSimpleMethodsIntegrationTests(model)); - } - private GeneratorTask createEndpointDiscoveryCacheLoaderTask() throws IOException { return createPoetGeneratorTask(new EndpointDiscoveryCacheLoaderGenerator(generatorTaskParams)); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index ff6373118501..71bd4768cc42 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -103,6 +103,13 @@ public class CustomizationConfig { /** * APIs that have no required arguments in their model but can't be called via a simple method */ + private List excludedSimpleMethods = new ArrayList<>(); + + /** + * APIs that have no required arguments in their model but can't be called via a simple method. + * Superseded by {@link #excludedSimpleMethods} + */ + @Deprecated private List blacklistedSimpleMethods = new ArrayList<>(); /** @@ -378,10 +385,26 @@ public void setServiceSpecificHttpConfig(String serviceSpecificHttpConfig) { this.serviceSpecificHttpConfig = serviceSpecificHttpConfig; } + public List getExcludedSimpleMethods() { + return excludedSimpleMethods; + } + + public void setExcludedSimpleMethods(List excludedSimpleMethods) { + this.excludedSimpleMethods = excludedSimpleMethods; + } + + /** + * Use {@link #getExcludedSimpleMethods()} + */ + @Deprecated public List getBlacklistedSimpleMethods() { return blacklistedSimpleMethods; } + /** + * Use {@link #setExcludedSimpleMethods(List)} + */ + @Deprecated public void setBlacklistedSimpleMethods(List blackListedSimpleMethods) { this.blacklistedSimpleMethods = blackListedSimpleMethods; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java index 6ab3f112b23b..892245e0cffa 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java @@ -21,10 +21,8 @@ import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.stream.Collectors; import software.amazon.awssdk.awscore.AwsResponse; import software.amazon.awssdk.awscore.AwsResponseMetadata; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; @@ -277,13 +275,6 @@ private String getResponseMetadataClassName() { return AwsResponseMetadata.class.getName(); } - @JsonIgnore - public List simpleMethodsRequiringTesting() { - return getOperations().values().stream() - .filter(v -> v.getInputShape().isSimpleMethod()) - .collect(Collectors.toList()); - } - public Optional getEndpointOperation() { return endpointOperation; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index 11dbe6794b8f..1ff197191126 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.docs.ClientType; import software.amazon.awssdk.codegen.docs.DocConfiguration; import software.amazon.awssdk.codegen.docs.OperationDocs; @@ -71,6 +72,8 @@ public class OperationModel extends DocumentationModel { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + @JsonIgnore private Map staticContextParams; @@ -309,6 +312,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java index 4f1d573b0133..e8a6826c17aa 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.model.intermediate.EndpointDiscovery; public class Operation { @@ -52,6 +53,8 @@ public class Operation { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + private Map staticContextParams; public String getName() { @@ -189,6 +192,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientSimpleMethodsIntegrationTests.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientSimpleMethodsIntegrationTests.java deleted file mode 100644 index 43130a1eec76..000000000000 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientSimpleMethodsIntegrationTests.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.codegen.poet.client; - -import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.TypeSpec; -import java.util.Optional; -import javax.lang.model.element.Modifier; -import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.intermediate.OperationModel; -import software.amazon.awssdk.codegen.poet.ClassSpec; -import software.amazon.awssdk.codegen.poet.PoetExtension; -import software.amazon.awssdk.codegen.poet.PoetUtils; -import software.amazon.awssdk.regions.Region; - -public class ClientSimpleMethodsIntegrationTests implements ClassSpec { - - private final IntermediateModel model; - private final PoetExtension poetExtensions; - - public ClientSimpleMethodsIntegrationTests(IntermediateModel model) { - this.model = model; - this.poetExtensions = new PoetExtension(model); - } - - @Override - public TypeSpec poetSpec() { - ClassName interfaceClass = poetExtensions.getClientClass(model.getMetadata().getSyncInterface()); - - TypeSpec.Builder builder = PoetUtils.createClassBuilder(className()) - .addModifiers(Modifier.PUBLIC) - .addField(FieldSpec.builder(interfaceClass, "client") - .addModifiers(Modifier.PRIVATE, Modifier.STATIC) - .build()) - .addMethod(setup()); - - model.simpleMethodsRequiringTesting().stream().map(this::simpleMethodsTest).forEach(builder::addMethod); - - return builder.build(); - } - - @Override - public ClassName className() { - return poetExtensions.getClientClass("SimpleMethodsIntegrationTest"); - } - - /** - * Creates a setup method for instantiating a new client. If no regions are present for a service, - * us-east-1 will be used. If the service is available in aws-global, that region will be used. If the - * service is not available in aws-global but is in us-east-1, that region will be used. If a service is - * not available in us-east-1 or aws-global, the first region in the available regions for a service will - * be used. - */ - private MethodSpec setup() { - String defaultRegion = Optional.ofNullable(model.getCustomizationConfig().getDefaultSimpleMethodTestRegion()) - .orElse("US_EAST_1"); - ClassName beforeClass = ClassName.get("org.junit", "BeforeClass"); - ClassName interfaceClass = poetExtensions.getClientClass(model.getMetadata().getSyncInterface()); - return MethodSpec.methodBuilder("setup") - .addAnnotation(beforeClass) - .addModifiers(Modifier.PUBLIC, Modifier.STATIC) - .beginControlFlow("if ($T.serviceMetadata().regions().isEmpty())", interfaceClass) - .addStatement("client = $T.builder().region($T.$L).build()", interfaceClass, Region.class, defaultRegion) - .endControlFlow() - .beginControlFlow("else if ($T.serviceMetadata().regions().contains($T.AWS_GLOBAL))", - interfaceClass, - Region.class) - .addStatement("client = $T.builder().region($T.AWS_GLOBAL).build()", - interfaceClass, - Region.class) - .endControlFlow() - .beginControlFlow("else if ($T.serviceMetadata().regions().contains($T.US_EAST_1))", - interfaceClass, - Region.class) - .addStatement("client = $T.builder().region($T.US_EAST_1).build()", - interfaceClass, - Region.class) - .endControlFlow() - .beginControlFlow("else") - .addStatement("client = $1T.builder().region($1T.serviceMetadata().regions().get(0)).build()", - interfaceClass) - .endControlFlow() - .build(); - } - - private MethodSpec simpleMethodsTest(OperationModel opModel) { - ClassName testClass = ClassName.get("org.junit", "Test"); - return MethodSpec.methodBuilder(opModel.getMethodName() + "_SimpleMethod_Succeeds") - .addAnnotation(testClass) - .addException(Exception.class) - .addModifiers(Modifier.PUBLIC) - .addStatement("client.$N()", opModel.getMethodName()) - .build(); - } -} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 41361004b80f..44922d4e2b32 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -187,7 +188,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") @@ -257,6 +259,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)) .add(".withInput($L)$L);", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index 74e15930c87e..daef19b9def3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -31,6 +31,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; @@ -116,7 +117,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, intermediateModel)); if (opModel.hasStreamingInput()) { @@ -151,7 +153,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(".withMetricCollector(apiCallMetricCollector)\n") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, intermediateModel)); builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 59769ff51d44..3f58b49edc7b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -135,7 +136,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withInput($L)", opModel.getInput().getVariableName()) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(codeBlock::add); @@ -213,7 +215,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(asyncRequestBody(opModel)) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(builder::add); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java new file mode 100644 index 000000000000..9290e02a003a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client.traits; + +import com.squareup.javapoet.CodeBlock; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; + +/** + * The logic for handling the "requestCompression" trait within the code generator. + */ +public class RequestCompressionTrait { + + private RequestCompressionTrait() { + } + + /** + * Generate a ".putExecutionAttribute(...)" code-block for the provided operation model. This should be used within the + * context of initializing {@link ClientExecutionParams}. If request compression is not required by the operation, this will + * return an empty code-block. + */ + public static CodeBlock create(OperationModel operationModel, IntermediateModel model) { + if (operationModel.getRequestCompression() == null) { + return CodeBlock.of(""); + } + + // TODO : remove once: + // 1) S3 checksum interceptors are moved to occur after CompressRequestStage + // 2) Transfer-Encoding:chunked is supported in S3 + if (model.getMetadata().getServiceName().equals("S3")) { + throw new IllegalStateException("Request compression for S3 is not yet supported in the AWS SDK for Java."); + } + + List encodings = operationModel.getRequestCompression().getEncodings(); + + return CodeBlock.of(".putExecutionAttribute($T.REQUEST_COMPRESSION, " + + "$T.builder().encodings($L).isStreaming($L).build())", + SdkInternalExecutionAttribute.class, RequestCompression.class, + encodings.stream().collect(Collectors.joining("\", \"", "\"", "\"")), + operationModel.hasStreamingInput()); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java index 226a34eef4a2..c23890c2d499 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java @@ -57,13 +57,6 @@ public void delegatingSyncClientClass() throws Exception { assertThat(syncClientDecoratorAbstractClass, generatesTo("test-abstract-sync-client-class.java")); } - @Test - public void simpleMethodsIntegClass() throws Exception { - ClientSimpleMethodsIntegrationTests simpleMethodsClass = new ClientSimpleMethodsIntegrationTests( - ClientTestModels.restJsonServiceModels()); - assertThat(simpleMethodsClass, generatesTo("test-simple-methods-integ-class.java")); - } - @Test public void syncClientClassRestJson() throws Exception { SyncClientClass syncClientClass = createSyncClientClass(ClientTestModels.restJsonServiceModels()); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config index eaab59be5b20..b22f02b7debe 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config @@ -12,7 +12,7 @@ "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", "customRetryStrategy": "software.amazon.MyServiceRetryStrategy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "eventStreamOperation" ], "utilitiesMethod": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json index 05f73f8e6069..65d931001984 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json @@ -30,6 +30,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json index 5827a53a9a27..a3c379d189d6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json @@ -59,6 +59,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config index 1279a8974876..4789e1fb0743 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config @@ -16,7 +16,7 @@ "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", "customRetryStrategy": "software.amazon.MyServiceRetryStrategy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "eventStreamOperation" ], "utilitiesMethod": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json index 66597cd7bd19..f003ba7d1e66 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json @@ -22,6 +22,16 @@ }, "httpChecksumRequired": true }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json index 267a48381fc9..451eb30d1e28 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json @@ -29,6 +29,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java index 783d45793ecb..05c476018466 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java @@ -29,6 +29,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -305,6 +307,33 @@ public CompletableFuture operationWithChe return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *

      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -468,7 +497,7 @@ public CompletableFuture streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, AsyncResponseTransformer asyncResponseTransformer) { return invokeOperation(streamingInputOutputOperationRequest, - request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); + request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); } /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java index cc067f5eab5b..8fc5e6c0adcd 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java @@ -23,6 +23,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -195,6 +197,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -400,7 +426,6 @@ public ReturnT streamingOutputOperation(StreamingOutputOperationReques request -> delegate.streamingOutputOperation(request, responseTransformer)); } - /** * Creates an instance of {@link JsonUtilities} object with the configuration set on this client. */ diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index b03bc8eb84d2..ae6973fafab0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -75,6 +76,8 @@ import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -99,6 +102,7 @@ import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.StreamingInputOperationRequestMarshaller; @@ -679,6 +683,66 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 4c480ea950ee..81eb8e1aba4e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -43,6 +43,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -81,6 +82,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -107,6 +110,7 @@ import software.amazon.awssdk.services.json.transform.InputEventMarshaller; import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -757,6 +761,66 @@ public CompletableFuture operationWithChe } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index 3ec88bc9ca0a..9b044964447a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -34,6 +34,8 @@ import software.amazon.awssdk.services.json.model.InputEventStreamTwo; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -115,7 +117,8 @@ default CompletableFuture aPostOperation(APostOperationR *

    * * @param aPostOperationRequest - * A {@link Consumer} that will call methods on {@link APostOperationRequest.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationRequest.Builder} to create a request. * @return A Java Future containing the result of the APostOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -179,7 +182,8 @@ default CompletableFuture aPostOperationWithOu *

    * * @param aPostOperationWithOutputRequest - * A {@link Consumer} that will call methods on {@link APostOperationWithOutputRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest.Builder} to create a * request. * @return A Java Future containing the result of the APostOperationWithOutput operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -236,8 +240,8 @@ default CompletableFuture bearerAuthOperation( *

    * * @param bearerAuthOperationRequest - * A {@link Consumer} that will call methods on {@link BearerAuthOperationRequest.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.BearerAuthOperationRequest.Builder} to create a request. * @return A Java Future containing the result of the BearerAuthOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -290,7 +294,8 @@ default CompletableFuture eventStreamOperation(EventStreamOperationRequest *

    * * @param eventStreamOperationRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationRequest.Builder} to create a * request. * @return A Java Future containing the result of the EventStreamOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -348,7 +353,8 @@ default CompletableFuture eventStream *

    * * @param eventStreamOperationWithOnlyInputRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationWithOnlyInputRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationWithOnlyInputRequest.Builder} to * create a request. * @return A Java Future containing the result of the EventStreamOperationWithOnlyInput operation returned by the * service.
    @@ -408,7 +414,8 @@ default CompletableFuture eventStreamOperationWithOnlyOutput( *

    * * @param eventStreamOperationWithOnlyOutputRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationWithOnlyOutputRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationWithOnlyOutputRequest.Builder} to * create a request. * @return A Java Future containing the result of the EventStreamOperationWithOnlyOutput operation returned by the * service.
    @@ -466,7 +473,9 @@ default CompletableFuture getOperationWithChec *

    * * @param getOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructure.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetOperationWithChecksumRequest.Builder} to create a + * request. * @return A Java Future containing the result of the GetOperationWithChecksum operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -527,7 +536,8 @@ default CompletableFuture getWithoutRequiredM *

    * * @param getWithoutRequiredMembersRequest - * A {@link Consumer} that will call methods on {@link GetWithoutRequiredMembersRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetWithoutRequiredMembers operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -585,8 +595,9 @@ default CompletableFuture operationWithCh *

    * * @param operationWithChecksumRequiredRequest - * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to - * create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest.Builder} to create + * a request. * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the * service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -609,6 +620,64 @@ default CompletableFuture operationWithCh .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation asynchronously.
    + *

    + * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

    + * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -642,7 +711,8 @@ default CompletableFuture paginatedOper *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A Java Future containing the result of the PaginatedOperationWithResultKey operation returned by the * service.
    @@ -902,7 +972,8 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -958,7 +1029,8 @@ default CompletableFuture paginatedO *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A Java Future containing the result of the PaginatedOperationWithoutResultKey operation returned by the * service.
    @@ -1120,7 +1192,8 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -1194,7 +1267,8 @@ default CompletableFuture putOperationWithChecksum( *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The @@ -1286,7 +1360,8 @@ default CompletableFuture putOperationWithChec *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1364,7 +1439,8 @@ default CompletableFuture streamingInputOperati *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The @@ -1429,7 +1505,8 @@ default CompletableFuture streamingInputOperati *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1500,8 +1577,9 @@ default CompletableFuture streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on @@ -1578,8 +1656,9 @@ default CompletableFuture streamingInputO *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have @@ -1650,7 +1729,8 @@ default CompletableFuture streamingOutputOperation( *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See @@ -1715,7 +1795,8 @@ default CompletableFuture streamingOutputOpera *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 54019ade037a..a2a8905fe12a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -49,6 +50,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -67,6 +70,7 @@ import software.amazon.awssdk.services.json.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -408,6 +412,57 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index 47d774dcfadd..f2586f4fd1d4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -27,6 +27,8 @@ import software.amazon.awssdk.services.json.model.JsonException; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -98,7 +100,8 @@ default APostOperationResponse aPostOperation(APostOperationRequest aPostOperati *

    * * @param aPostOperationRequest - * A {@link Consumer} that will call methods on {@link APostOperationRequest.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationRequest.Builder} to create a request. * @return Result of the APostOperation operation returned by the service. * @throws InvalidInputException * The request was rejected because an invalid or out-of-range value was supplied for an input parameter. @@ -158,7 +161,8 @@ default APostOperationWithOutputResponse aPostOperationWithOutput( *

    * * @param aPostOperationWithOutputRequest - * A {@link Consumer} that will call methods on {@link APostOperationWithOutputRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest.Builder} to create a * request. * @return Result of the APostOperationWithOutput operation returned by the service. * @throws InvalidInputException @@ -210,8 +214,8 @@ default BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReque *

    * * @param bearerAuthOperationRequest - * A {@link Consumer} that will call methods on {@link BearerAuthOperationRequest.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.BearerAuthOperationRequest.Builder} to create a request. * @return Result of the BearerAuthOperation operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -260,7 +264,9 @@ default GetOperationWithChecksumResponse getOperationWithChecksum( *

    * * @param getOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructure.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetOperationWithChecksumRequest.Builder} to create a + * request. * @return Result of the GetOperationWithChecksum operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -317,7 +323,8 @@ default GetWithoutRequiredMembersResponse getWithoutRequiredMembers( *

    * * @param getWithoutRequiredMembersRequest - * A {@link Consumer} that will call methods on {@link GetWithoutRequiredMembersRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersRequest.Builder} to create a * request. * @return Result of the GetWithoutRequiredMembers operation returned by the service. * @throws InvalidInputException @@ -370,8 +377,9 @@ default OperationWithChecksumRequiredResponse operationWithChecksumRequired( *

    * * @param operationWithChecksumRequiredRequest - * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to - * create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest.Builder} to create + * a request. * @return Result of the OperationWithChecksumRequired operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -391,6 +399,58 @@ default OperationWithChecksumRequiredResponse operationWithChecksumRequired( .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation.
    + *

    + * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

    + * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) + throws AwsServiceException, SdkClientException, JsonException { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -421,7 +481,8 @@ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return Result of the PaginatedOperationWithResultKey operation returned by the service. * @throws SdkException @@ -679,7 +740,8 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A custom iterable that can be used to iterate through all the response pages. * @throws SdkException @@ -730,7 +792,8 @@ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResu *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return Result of the PaginatedOperationWithoutResultKey operation returned by the service. * @throws SdkException @@ -891,7 +954,8 @@ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResu *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A custom iterable that can be used to iterate through all the response pages. * @throws SdkException @@ -968,7 +1032,8 @@ default ReturnT putOperationWithChecksum(PutOperationWithChecksumReque *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory @@ -1066,7 +1131,8 @@ default PutOperationWithChecksumResponse putOperationWithChecksum( *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1147,7 +1213,8 @@ default StreamingInputOperationResponse streamingInputOperation( *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory @@ -1215,7 +1282,8 @@ default StreamingInputOperationResponse streamingInputOperation( *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1291,8 +1359,9 @@ default ReturnT streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the @@ -1374,8 +1443,9 @@ default StreamingInputOutputOperationResponse streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have @@ -1444,7 +1514,8 @@ default ReturnT streamingOutputOperation(StreamingOutputOperationReque *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled @@ -1508,7 +1579,8 @@ default StreamingOutputOperationResponse streamingOutputOperation( *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method @@ -1570,7 +1642,8 @@ default ResponseInputStream streamingOutputOpe *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @return A {@link ResponseInputStream} containing data streamed from service. Note that this is an unmanaged * reference to the underlying HTTP connection so great care must be taken to ensure all data if fully read @@ -1631,7 +1704,8 @@ default ResponseBytes streamingOutputOperation *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @return A {@link ResponseBytes} that loads the data streamed from the service into memory and exposes it in * convenient in-memory representations like a byte buffer or string. The unmarshalled response object can diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index a5e04e6abc24..b0ca9683c0c5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -52,6 +53,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -69,6 +72,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -494,6 +498,63 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • QueryException Base class for all service exceptions. Unknown exceptions will be thrown as an + * instance of this type.
    • + *
    + * @sample QueryAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the OperationWithStaticContextParams operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index d9fdd08fef61..0ca5d7837899 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +46,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -62,6 +65,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -422,6 +426,54 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws QueryException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample QueryClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the OperationWithStaticContextParams operation. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-simple-methods-integ-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-simple-methods-integ-class.java deleted file mode 100644 index 45f038006a2d..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-simple-methods-integ-class.java +++ /dev/null @@ -1,29 +0,0 @@ -package software.amazon.awssdk.services.json; - -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.annotations.Generated; -import software.amazon.awssdk.regions.Region; - -@Generated("software.amazon.awssdk:codegen") -public class SimpleMethodsIntegrationTest { - private static JsonClient client; - - @BeforeClass - public static void setup() { - if (JsonClient.serviceMetadata().regions().isEmpty()) { - client = JsonClient.builder().region(Region.US_EAST_1).build(); - } else if (JsonClient.serviceMetadata().regions().contains(Region.AWS_GLOBAL)) { - client = JsonClient.builder().region(Region.AWS_GLOBAL).build(); - } else if (JsonClient.serviceMetadata().regions().contains(Region.US_EAST_1)) { - client = JsonClient.builder().region(Region.US_EAST_1).build(); - } else { - client = JsonClient.builder().region(JsonClient.serviceMetadata().regions().get(0)).build(); - } - } - - @Test - public void paginatedOperationWithResultKey_SimpleMethod_Succeeds() throws Exception { - client.paginatedOperationWithResultKey(); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index c1dc8837dbbb..959bfd8618bf 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -35,6 +35,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -61,6 +62,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -76,6 +79,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -519,6 +523,62 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
    + * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
      + *
    • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
    • + *
    • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
    • + *
    • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
    • + *
    + * @sample XmlAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + return whenCompleteFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the PutOperationWithChecksum operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 43e33d67c4dc..d52550654b17 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +46,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -59,6 +62,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -361,6 +365,52 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the PutOperationWithChecksum operation. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config index e1206b3fd913..213183ce5a87 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config index d3b7f5d49d46..73d3ee1594f4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config index 5e44eaa81f83..ced0f694b186 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config index 78f3be1bf76c..2f34e03ed047 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput" diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config index b57099f2d797..e66eb09003af 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 38e32d102325..c1604d066fd4 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 802774018d6e..08dd6931e1b7 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 91dc602039b7..4d8e81954ab9 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 2e8581589b6d..e018b20a5085 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java index 636fad74f9fc..3174eb7c6caa 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java @@ -40,7 +40,6 @@ @SdkInternalApi public final class AwsSignedChunkedEncodingInputStream extends AwsChunkedEncodingInputStream { - private static final String CRLF = "\r\n"; private static final String CHUNK_SIGNATURE_HEADER = ";chunk-signature="; private static final String CHECKSUM_SIGNATURE_HEADER = "x-amz-trailer-signature:"; private String previousChunkSignature; diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index ffead75c75ba..687ecf0735ab 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT aws-core diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index 626813252a3e..1efdb86d4a5a 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index d45d7b525098..8451671c9857 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/imds/pom.xml b/core/imds/pom.xml index 3ec164ba79d9..c615db933708 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index e262f9de0a9b..ebf19e29fbc1 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 2378f2e38ee9..6c98ed9b18cf 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index bdc7e5fd789c..d478f2d2e002 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 71fed9dd446b..9c8847ee845f 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 32804fbd44ea..3551f50c52f8 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -141,6 +141,18 @@ public final class ProfileProperty { public static final String EC2_METADATA_SERVICE_ENDPOINT = "ec2_metadata_service_endpoint"; + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + public static final String DISABLE_REQUEST_COMPRESSION = "disable_request_compression"; + + /** + * The minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + public static final String REQUEST_MIN_COMPRESSION_SIZE_BYTES = "request_min_compression_size_bytes"; + private ProfileProperty() { } } diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 5b36c168871b..b1b09bbb283f 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index e934c54da81b..4ea8fa524a56 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 84195df02010..0b89c5a59699 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index c1b5b3ef5820..62f308d73584 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index f535b05c9b5c..b8d0130328c9 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index e29093bd76fb..b951eeb71649 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index a87ad3caabac..2a28da7c8917 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index a686c926e997..50d1601ec167 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -375,6 +375,7 @@ "deprecated" : true, "hostname" : "acm-pca-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -521,6 +522,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -1066,6 +1068,7 @@ }, "endpoints" : { "ap-south-1" : { }, + "eu-central-1" : { }, "us-east-1" : { } } }, @@ -1610,6 +1613,12 @@ "tags" : [ "dualstack" ] } ] }, + "il-central-1" : { + "variants" : [ { + "hostname" : "appmesh.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-south-1" : { "variants" : [ { "hostname" : "appmesh.me-south-1.api.aws", @@ -2035,6 +2044,12 @@ "deprecated" : true, "hostname" : "athena-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "athena.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "athena.me-central-1.api.aws", @@ -2205,6 +2220,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2326,6 +2342,7 @@ "deprecated" : true, "hostname" : "fips.batch.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3032,6 +3049,7 @@ "deprecated" : true, "hostname" : "codecommit-fips.ca-central-1.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3332,6 +3350,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3370,6 +3389,7 @@ "deprecated" : true, "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -3402,6 +3422,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3440,6 +3461,7 @@ "deprecated" : true, "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -3837,6 +3859,7 @@ "endpoints" : { "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, "eu-west-2" : { }, "fips-us-east-1" : { "credentialScope" : { @@ -3910,6 +3933,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -4400,6 +4424,7 @@ "deprecated" : true, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6209,6 +6234,7 @@ "deprecated" : true, "hostname" : "email-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -6405,26 +6431,126 @@ }, "es" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "aos.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "aos.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "aos.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "aos.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "aos.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "aos.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "aos.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "aos.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "aos.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "aos.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "aos.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "aos.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "aos.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "aos.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "aos.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "aos.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "aos.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "aos.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "aos.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "aos.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips" : { "credentialScope" : { "region" : "us-west-1" @@ -6432,12 +6558,35 @@ "deprecated" : true, "hostname" : "es-fips.us-west-1.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "aos.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "aos.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "aos.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "aos.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "aos.us-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -6451,6 +6600,9 @@ }, "us-east-2" : { "variants" : [ { + "hostname" : "aos.us-east-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -6464,6 +6616,9 @@ }, "us-west-1" : { "variants" : [ { + "hostname" : "aos.us-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -6477,6 +6632,9 @@ }, "us-west-2" : { "variants" : [ { + "hostname" : "aos.us-west-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -6922,6 +7080,7 @@ "deprecated" : true, "hostname" : "fms-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { "variants" : [ { @@ -7168,6 +7327,7 @@ "deprecated" : true, "hostname" : "fsx-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "prod-ca-central-1" : { @@ -7673,6 +7833,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -7851,11 +8012,13 @@ "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -8019,12 +8182,60 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "internetmonitor" : { @@ -8073,7 +8284,7 @@ "ca-central-1" : { "hostname" : "internetmonitor.ca-central-1.api.aws", "variants" : [ { - "hostname" : "internetmonitor-fips.ca-central-1.api.aws", + "hostname" : "internetmonitor-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] } ] }, @@ -8116,28 +8327,28 @@ "us-east-1" : { "hostname" : "internetmonitor.us-east-1.api.aws", "variants" : [ { - "hostname" : "internetmonitor-fips.us-east-1.api.aws", + "hostname" : "internetmonitor-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-east-2" : { "hostname" : "internetmonitor.us-east-2.api.aws", "variants" : [ { - "hostname" : "internetmonitor-fips.us-east-2.api.aws", + "hostname" : "internetmonitor-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-1" : { "hostname" : "internetmonitor.us-west-1.api.aws", "variants" : [ { - "hostname" : "internetmonitor-fips.us-west-1.api.aws", + "hostname" : "internetmonitor-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-2" : { "hostname" : "internetmonitor.us-west-2.api.aws", "variants" : [ { - "hostname" : "internetmonitor-fips.us-west-2.api.aws", + "hostname" : "internetmonitor-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] } @@ -8844,6 +9055,7 @@ "deprecated" : true, "hostname" : "kafka-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -9894,6 +10106,7 @@ "deprecated" : true, "hostname" : "license-manager-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10012,6 +10225,7 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, @@ -10047,6 +10261,7 @@ "deprecated" : true, "hostname" : "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10339,6 +10554,7 @@ "deprecated" : true, "hostname" : "macie2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10442,9 +10658,11 @@ "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com", @@ -10649,6 +10867,7 @@ "endpoints" : { "ap-southeast-1" : { }, "eu-central-1" : { }, + "il-central-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "meetings-chime-fips.us-east-1.amazonaws.com", @@ -10853,6 +11072,7 @@ "deprecated" : true, "hostname" : "mgn-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11349,7 +11569,18 @@ "credentialScope" : { "region" : "us-west-2" }, - "hostname" : "networkmanager.us-west-2.amazonaws.com" + "hostname" : "networkmanager.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "networkmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "networkmanager-fips.us-west-2.amazonaws.com" } }, "isRegionalized" : false, @@ -11500,6 +11731,12 @@ }, "hostname" : "oidc.eu-west-3.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "oidc.il-central-1.amazonaws.com" + }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" @@ -11727,6 +11964,8 @@ "deprecated" : true, "hostname" : "outposts-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -12122,6 +12361,12 @@ }, "hostname" : "portal.sso.eu-west-3.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "portal.sso.il-central-1.amazonaws.com" + }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" @@ -12880,6 +13125,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "il-central-1" : { }, "rekognition-fips.ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -14061,6 +14307,7 @@ "fips-us-west-2" : { "deprecated" : true }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -14135,18 +14382,26 @@ }, "schemas" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -14855,6 +15110,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15823,6 +16079,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -15945,6 +16202,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16668,6 +16926,7 @@ "deprecated" : true, "hostname" : "transfer-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16890,7 +17149,10 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -18646,8 +18908,18 @@ }, "es" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "aos.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "aos.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "events" : { @@ -19142,6 +19414,12 @@ }, "isRegionalized" : true }, + "schemas" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "cn-north-1" : { }, @@ -21100,6 +21378,9 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "aos.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -21113,6 +21394,9 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "aos.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "es-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -21283,6 +21567,23 @@ } } }, + "geo" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "geo-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "geo-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "glacier" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -21574,8 +21875,32 @@ }, "inspector2" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "internetmonitor" : { @@ -22281,6 +22606,17 @@ "credentialScope" : { "region" : "us-gov-west-1" }, + "hostname" : "networkmanager.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "networkmanager.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, "hostname" : "networkmanager.us-gov-west-1.amazonaws.com" } }, @@ -23065,13 +23401,13 @@ }, "us-gov-east-1" : { "variants" : [ { - "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-gov-east-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -23084,13 +23420,13 @@ }, "us-gov-west-1" : { "variants" : [ { - "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-gov-west-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -24055,7 +24391,8 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "elasticache" : { @@ -24293,6 +24630,12 @@ "us-iso-west-1" : { } } }, + "resource-groups" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "route53" : { "endpoints" : { "aws-iso-global" : { @@ -24737,6 +25080,11 @@ "us-isob-east-1" : { } } }, + "outposts" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "ram" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index ca32a968f478..c5c161568615 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index ba275e088f9a..4240d73ea6a7 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 98fdcf2b128f..a9754348bc2f 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java new file mode 100644 index 000000000000..60ea1b94472f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/CompressionConfiguration.java @@ -0,0 +1,141 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for operations with the RequestCompression trait to disable request configuration and set the minimum + * compression threshold in bytes. + */ +@SdkPublicApi +public final class CompressionConfiguration implements ToCopyableBuilder { + + private final Boolean requestCompressionEnabled; + private final Integer minimumCompressionThresholdInBytes; + + private CompressionConfiguration(DefaultBuilder builder) { + this.requestCompressionEnabled = builder.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = builder.minimumCompressionThresholdInBytes; + } + + /** + * If set, returns true if request compression is enabled, else false if request compression is disabled. + */ + public Boolean requestCompressionEnabled() { + return requestCompressionEnabled; + } + + /** + * If set, returns the minimum compression threshold in bytes, inclusive, in order to trigger request compression. + */ + public Integer minimumCompressionThresholdInBytes() { + return minimumCompressionThresholdInBytes; + } + + /** + * Create a {@link CompressionConfiguration.Builder}, used to create a {@link CompressionConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompressionConfiguration that = (CompressionConfiguration) o; + + if (!requestCompressionEnabled.equals(that.requestCompressionEnabled)) { + return false; + } + return Objects.equals(minimumCompressionThresholdInBytes, that.minimumCompressionThresholdInBytes); + } + + @Override + public int hashCode() { + int result = requestCompressionEnabled != null ? requestCompressionEnabled.hashCode() : 0; + result = 31 * result + (minimumCompressionThresholdInBytes != null ? minimumCompressionThresholdInBytes.hashCode() : 0); + return result; + } + + + public interface Builder extends CopyableBuilder { + + /** + * Configures whether request compression is enabled or not, for operations that the service has designated as + * supporting compression. The default value is true. + * + * @param requestCompressionEnabled + * @return This object for method chaining. + */ + Builder requestCompressionEnabled(Boolean requestCompressionEnabled); + + /** + * Configures the minimum compression threshold, inclusive, in bytes. A request whose size is less than the threshold + * will not be compressed, even if the compression trait is present. The default value is 10_240. The value must be + * non-negative and no greater than 10_485_760. + * + * @param minimumCompressionThresholdInBytes + * @return This object for method chaining. + */ + Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes); + } + + private static final class DefaultBuilder implements Builder { + private Boolean requestCompressionEnabled; + private Integer minimumCompressionThresholdInBytes; + + private DefaultBuilder() { + } + + private DefaultBuilder(CompressionConfiguration compressionConfiguration) { + this.requestCompressionEnabled = compressionConfiguration.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = compressionConfiguration.minimumCompressionThresholdInBytes; + } + + @Override + public Builder requestCompressionEnabled(Boolean requestCompressionEnabled) { + this.requestCompressionEnabled = requestCompressionEnabled; + return this; + } + + @Override + public Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes) { + this.minimumCompressionThresholdInBytes = minimumCompressionThresholdInBytes; + return this; + } + + @Override + public CompressionConfiguration build() { + return new CompressionConfiguration(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java new file mode 100644 index 000000000000..07e7a98e424e --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java @@ -0,0 +1,209 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.nio.file.Path; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for {@link AsyncRequestBody#fromFile(FileRequestBodyConfiguration)} to configure how the SDK + * should read the file. + * + * @see #builder() + */ +@SdkPublicApi +public final class FileRequestBodyConfiguration implements ToCopyableBuilder { + private final Integer chunkSizeInBytes; + private final Long position; + private final Long numBytesToRead; + private final Path path; + + private FileRequestBodyConfiguration(DefaultBuilder builder) { + this.path = Validate.notNull(builder.path, "path"); + this.chunkSizeInBytes = Validate.isPositiveOrNull(builder.chunkSizeInBytes, "chunkSizeInBytes"); + this.position = Validate.isNotNegativeOrNull(builder.position, "position"); + this.numBytesToRead = Validate.isNotNegativeOrNull(builder.numBytesToRead, "numBytesToRead"); + } + + /** + * Create a {@link Builder}, used to create a {@link FileRequestBodyConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * @return the size of each chunk to read from the file + */ + public Integer chunkSizeInBytes() { + return chunkSizeInBytes; + } + + /** + * @return the file position at which the request body begins. + */ + public Long position() { + return position; + } + + /** + * @return the number of bytes to read from this file. + */ + public Long numBytesToRead() { + return numBytesToRead; + } + + /** + * @return the file path + */ + public Path path() { + return path; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FileRequestBodyConfiguration that = (FileRequestBodyConfiguration) o; + + if (!Objects.equals(chunkSizeInBytes, that.chunkSizeInBytes)) { + return false; + } + if (!Objects.equals(position, that.position)) { + return false; + } + if (!Objects.equals(numBytesToRead, that.numBytesToRead)) { + return false; + } + return Objects.equals(path, that.path); + } + + @Override + public int hashCode() { + int result = chunkSizeInBytes != null ? chunkSizeInBytes.hashCode() : 0; + result = 31 * result + (position != null ? position.hashCode() : 0); + result = 31 * result + (numBytesToRead != null ? numBytesToRead.hashCode() : 0); + result = 31 * result + (path != null ? path.hashCode() : 0); + return result; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + public interface Builder extends CopyableBuilder { + + /** + * Sets the {@link Path} to the file containing data to send to the service + * + * @param path Path to file to read. + * @return This builder for method chaining. + */ + Builder path(Path path); + + /** + * Sets the size of chunks read from the file. Increasing this will cause more data to be buffered into memory but + * may yield better latencies. Decreasing this will reduce memory usage but may cause reduced latency. Setting this value + * is very dependent on upload speed and requires some performance testing to tune. + * + *

    The default chunk size is 16 KiB

    + * + * @param chunkSize New chunk size in bytes. + * @return This builder for method chaining. + */ + Builder chunkSizeInBytes(Integer chunkSize); + + /** + * Sets the file position at which the request body begins. + * + *

    By default, it's 0, i.e., reading from the beginning. + * + * @param position the position of the file + * @return The builder for method chaining. + */ + Builder position(Long position); + + /** + * Sets the number of bytes to read from this file. + * + *

    By default, it's same as the file length. + * + * @param numBytesToRead number of bytes to read + * @return The builder for method chaining. + */ + Builder numBytesToRead(Long numBytesToRead); + } + + private static final class DefaultBuilder implements Builder { + private Long position; + private Path path; + private Integer chunkSizeInBytes; + private Long numBytesToRead; + + private DefaultBuilder(FileRequestBodyConfiguration configuration) { + this.position = configuration.position; + this.path = configuration.path; + this.chunkSizeInBytes = configuration.chunkSizeInBytes; + this.numBytesToRead = configuration.numBytesToRead; + } + + private DefaultBuilder() { + + } + + @Override + public Builder path(Path path) { + this.path = path; + return this; + } + + @Override + public Builder chunkSizeInBytes(Integer chunkSizeInBytes) { + this.chunkSizeInBytes = chunkSizeInBytes; + return this; + } + + @Override + public Builder position(Long position) { + this.position = position; + return this; + } + + @Override + public Builder numBytesToRead(Long numBytesToRead) { + this.numBytesToRead = numBytesToRead; + return this; + } + + @Override + public FileRequestBodyConfiguration build() { + return new FileRequestBodyConfiguration(this); + } + } + +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java index cb4daf65922a..9dc55c2ee910 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java @@ -51,8 +51,8 @@ public abstract class RequestOverrideConfiguration { private final Signer signer; private final List metricPublishers; private final ExecutionAttributes executionAttributes; - private final EndpointProvider endpointProvider; + private final CompressionConfiguration compressionConfiguration; protected RequestOverrideConfiguration(Builder builder) { this.headers = CollectionUtils.deepUnmodifiableMap(builder.headers(), () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); @@ -64,6 +64,7 @@ protected RequestOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.endpointProvider = builder.endpointProvider(); + this.compressionConfiguration = builder.compressionConfiguration(); } /** @@ -165,6 +166,15 @@ public Optional endpointProvider() { return Optional.ofNullable(endpointProvider); } + /** + * Returns the compression configuration object, if present, which includes options to enable/disable compression and set + * the minimum compression threshold. This compression config object supersedes the compression config object set on the + * client. + */ + public Optional compressionConfiguration() { + return Optional.ofNullable(compressionConfiguration); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -182,7 +192,8 @@ public boolean equals(Object o) { Objects.equals(signer, that.signer) && Objects.equals(metricPublishers, that.metricPublishers) && Objects.equals(executionAttributes, that.executionAttributes) && - Objects.equals(endpointProvider, that.endpointProvider); + Objects.equals(endpointProvider, that.endpointProvider) && + Objects.equals(compressionConfiguration, that.compressionConfiguration); } @Override @@ -197,6 +208,7 @@ public int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(metricPublishers); hashCode = 31 * hashCode + Objects.hashCode(executionAttributes); hashCode = 31 * hashCode + Objects.hashCode(endpointProvider); + hashCode = 31 * hashCode + Objects.hashCode(compressionConfiguration); return hashCode; } @@ -438,6 +450,26 @@ default B putRawQueryParameter(String name, String value) { EndpointProvider endpointProvider(); + /** + * Sets the {@link CompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param compressionConfiguration Request compression configuration object for this request. + */ + B compressionConfiguration(CompressionConfiguration compressionConfiguration); + + /** + * Sets the {@link CompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param compressionConfigurationConsumer A {@link Consumer} that accepts a {@link CompressionConfiguration.Builder} + * + * @return This object for method chaining + */ + B compressionConfiguration(Consumer compressionConfigurationConsumer); + + CompressionConfiguration compressionConfiguration(); + /** * Create a new {@code SdkRequestOverrideConfiguration} with the properties set on this builder. * @@ -455,9 +487,8 @@ protected abstract static class BuilderImpl implements Builde private Signer signer; private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributesBuilder = ExecutionAttributes.builder(); - private EndpointProvider endpointProvider; - + private CompressionConfiguration compressionConfiguration; protected BuilderImpl() { } @@ -472,6 +503,7 @@ protected BuilderImpl(RequestOverrideConfiguration sdkRequestOverrideConfig) { metricPublishers(sdkRequestOverrideConfig.metricPublishers()); executionAttributes(sdkRequestOverrideConfig.executionAttributes()); endpointProvider(sdkRequestOverrideConfig.endpointProvider); + compressionConfiguration(sdkRequestOverrideConfig.compressionConfiguration); } @Override @@ -626,7 +658,6 @@ public void setExecutionAttributes(ExecutionAttributes executionAttributes) { executionAttributes(executionAttributes); } - @Override public B endpointProvider(EndpointProvider endpointProvider) { this.endpointProvider = endpointProvider; @@ -641,5 +672,24 @@ public void setEndpointProvider(EndpointProvider endpointProvider) { public EndpointProvider endpointProvider() { return endpointProvider; } + + @Override + public B compressionConfiguration(CompressionConfiguration compressionConfiguration) { + this.compressionConfiguration = compressionConfiguration; + return (B) this; + } + + @Override + public B compressionConfiguration(Consumer compressionConfigurationConsumer) { + CompressionConfiguration.Builder b = CompressionConfiguration.builder(); + compressionConfigurationConsumer.accept(b); + compressionConfiguration(b.build()); + return (B) this; + } + + @Override + public CompressionConfiguration compressionConfiguration() { + return compressionConfiguration; + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index 1e5c400ca617..f04029a3b0fe 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -184,6 +184,18 @@ public enum SdkSystemSetting implements SystemSetting { */ AWS_USE_FIPS_ENDPOINT("aws.useFipsEndpoint", null), + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + AWS_DISABLE_REQUEST_COMPRESSION("aws.disableRequestCompression", null), + + /** + * Defines the minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES("aws.requestMinCompressionSizeBytes", null), + ; private final String systemProperty; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 4c7d70ab7553..8fd0fb6d6659 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -29,6 +29,7 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.FileRequestBodyConfiguration; import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.internal.async.InputStreamWithExecutorAsyncRequestBody; @@ -112,16 +113,46 @@ static AsyncRequestBody fromFile(Path path) { /** * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. See - * {@link FileAsyncRequestBody#builder} to create a customized body implementation. + * {@link #fromFile(FileRequestBodyConfiguration)} to create a customized body implementation. * * @param file The file to read from. * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. - * @see FileAsyncRequestBody */ static AsyncRequestBody fromFile(File file) { return FileAsyncRequestBody.builder().path(file.toPath()).build(); } + /** + * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. + * + * @param configuration configuration for how the SDK should read the file + * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. + */ + static AsyncRequestBody fromFile(FileRequestBodyConfiguration configuration) { + Validate.notNull(configuration, "configuration"); + return FileAsyncRequestBody.builder() + .path(configuration.path()) + .position(configuration.position()) + .chunkSizeInBytes(configuration.chunkSizeInBytes()) + .numBytesToRead(configuration.numBytesToRead()) + .build(); + } + + /** + * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. + * + *

    + * This is a convenience method that creates an instance of the {@link FileRequestBodyConfiguration} builder, + * avoiding the need to create one manually via {@link FileRequestBodyConfiguration#builder()}. + * + * @param configuration configuration for how the SDK should read the file + * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. + */ + static AsyncRequestBody fromFile(Consumer configuration) { + Validate.notNull(configuration, "configuration"); + return fromFile(FileRequestBodyConfiguration.builder().applyMutation(configuration).build()); + } + /** * Creates an {@link AsyncRequestBody} that uses a single string as data. * @@ -410,22 +441,18 @@ static AsyncRequestBody empty() { * is 2MB and the default buffer size is 8MB. * *

    - * If content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is delivered to the - * subscriber right after it's initialized. - *

    - * If content length is null, it is sent after the entire content for that chunk is buffered. - * In this case, the configured {@code maxMemoryUsageInBytes} must be larger than or equal to {@code chunkSizeInBytes}. + * By default, if content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is + * delivered to the subscriber right after it's initialized. On the other hand, if content length is null, it is sent after + * the entire content for that chunk is buffered. In this case, the configured {@code maxMemoryUsageInBytes} must be larger + * than or equal to {@code chunkSizeInBytes}. Note that this behavior may be different if a specific implementation of this + * interface overrides this method. * * @see AsyncRequestBodySplitConfiguration */ default SdkPublisher split(AsyncRequestBodySplitConfiguration splitConfiguration) { Validate.notNull(splitConfiguration, "splitConfiguration"); - return SplittingPublisher.builder() - .asyncRequestBody(this) - .chunkSizeInBytes(splitConfiguration.chunkSizeInBytes()) - .bufferSizeInBytes(splitConfiguration.bufferSizeInBytes()) - .build(); + return new SplittingPublisher(this, splitConfiguration); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java index fe51f33b4ff3..45596ab03eaa 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java @@ -28,6 +28,12 @@ @SdkPublicApi public final class AsyncRequestBodySplitConfiguration implements ToCopyableBuilder { + private static final long DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024L; + private static final long DEFAULT_BUFFER_SIZE = DEFAULT_CHUNK_SIZE * 4; + private static final AsyncRequestBodySplitConfiguration DEFAULT_CONFIG = builder() + .bufferSizeInBytes(DEFAULT_BUFFER_SIZE) + .chunkSizeInBytes(DEFAULT_CHUNK_SIZE) + .build(); private final Long chunkSizeInBytes; private final Long bufferSizeInBytes; @@ -36,6 +42,10 @@ private AsyncRequestBodySplitConfiguration(DefaultBuilder builder) { this.bufferSizeInBytes = Validate.isPositiveOrNull(builder.bufferSizeInBytes, "bufferSizeInBytes"); } + public static AsyncRequestBodySplitConfiguration defaultConfiguration() { + return DEFAULT_CONFIG; + } + /** * The configured chunk size for each divided {@link AsyncRequestBody}. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java index 58c5dec433c6..5563b716dae6 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java @@ -20,10 +20,12 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.async.AddingTrailingDataSubscriber; import software.amazon.awssdk.utils.async.BufferingSubscriber; import software.amazon.awssdk.utils.async.EventListeningSubscriber; import software.amazon.awssdk.utils.async.FilteringSubscriber; @@ -118,6 +120,18 @@ default SdkPublisher limit(int limit) { return subscriber -> subscribe(new LimitingSubscriber<>(subscriber, limit)); } + + /** + * Creates a new publisher that emits trailing events provided by {@code trailingDataSupplier} in addition to the + * published events. + * + * @param trailingDataSupplier supplier to provide the trailing data + * @return New publisher that will publish additional events + */ + default SdkPublisher addTrailingData(Supplier> trailingDataSupplier) { + return subscriber -> subscribe(new AddingTrailingDataSubscriber(subscriber, trailingDataSupplier)); + } + /** * Add a callback that will be invoked after this publisher invokes {@link Subscriber#onComplete()}. * diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index f9e43f242855..1d166aca4426 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -29,6 +29,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.ASYNC_HTTP_CLIENT; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_TYPE; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_USER_AGENT; +import static software.amazon.awssdk.core.client.config.SdkClientOption.COMPRESSION_CONFIGURATION; import static software.amazon.awssdk.core.client.config.SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED; import static software.amazon.awssdk.core.client.config.SdkClientOption.ENDPOINT_OVERRIDDEN; import static software.amazon.awssdk.core.client.config.SdkClientOption.EXECUTION_ATTRIBUTES; @@ -64,6 +65,8 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.client.config.ClientAsyncConfiguration; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -84,9 +87,11 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.profiles.Profile; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSupplier; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.retries.AdaptiveRetryStrategy; import software.amazon.awssdk.retries.LegacyRetryStrategy; import software.amazon.awssdk.retries.StandardRetryStrategy; @@ -244,6 +249,7 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration builder.option(METRIC_PUBLISHERS, clientOverrideConfiguration.metricPublishers()); builder.option(EXECUTION_ATTRIBUTES, clientOverrideConfiguration.executionAttributes()); builder.option(TOKEN_SIGNER, clientOverrideConfiguration.advancedOption(TOKEN_SIGNER).orElse(null)); + builder.option(COMPRESSION_CONFIGURATION, clientOverrideConfiguration.compressionConfiguration().orElse(null)); clientOverrideConfiguration.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE).ifPresent(value -> { builder.option(ENDPOINT_OVERRIDDEN, value); @@ -273,14 +279,83 @@ private SdkClientConfiguration mergeGlobalDefaults(SdkClientConfiguration config Optional.ofNullable(configuration.option(PROFILE_FILE_SUPPLIER)) .orElseGet(() -> ProfileFileSupplier.fixedProfileFile(ProfileFile.defaultProfileFile())); - return configuration.merge(c -> c.option(EXECUTION_INTERCEPTORS, new ArrayList<>()) - .option(ADDITIONAL_HTTP_HEADERS, new LinkedHashMap<>()) - .option(PROFILE_FILE, profileFileSupplier.get()) - .option(PROFILE_FILE_SUPPLIER, profileFileSupplier) - .option(PROFILE_NAME, ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()) - .option(USER_AGENT_PREFIX, SdkUserAgent.create().userAgent()) - .option(USER_AGENT_SUFFIX, "") - .option(CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + configuration = configuration.merge(c -> c.option(EXECUTION_INTERCEPTORS, new ArrayList<>()) + .option(ADDITIONAL_HTTP_HEADERS, new LinkedHashMap<>()) + .option(PROFILE_FILE, profileFileSupplier.get()) + .option(PROFILE_FILE_SUPPLIER, profileFileSupplier) + .option(PROFILE_NAME, + ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow()) + .option(USER_AGENT_PREFIX, SdkUserAgent.create().userAgent()) + .option(USER_AGENT_SUFFIX, "") + .option(CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + + return addCompressionConfigGlobalDefaults(configuration); + } + + private SdkClientConfiguration addCompressionConfigGlobalDefaults(SdkClientConfiguration configuration) { + Optional requestCompressionEnabled = getCompressionEnabled(configuration); + Optional minCompressionThreshold = getCompressionThreshold(configuration); + + if (requestCompressionEnabled.isPresent() && minCompressionThreshold.isPresent()) { + return configuration; + } + + Boolean compressionEnabled = requestCompressionEnabled.orElse(null); + Integer compressionThreshold = minCompressionThreshold.orElse(null); + + if (compressionEnabled == null) { + Optional systemSetting = SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue(); + if (systemSetting.isPresent()) { + compressionEnabled = !systemSetting.get(); + } else { + Profile profile = configuration.option(PROFILE_FILE_SUPPLIER).get() + .profile(configuration.option(PROFILE_NAME)).orElse(null); + if (profile != null) { + Optional profileSetting = profile.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION); + if (profileSetting.isPresent()) { + compressionEnabled = !profileSetting.get(); + } + } + } + } + + if (compressionThreshold == null) { + Optional systemSetting = SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue(); + if (systemSetting.isPresent()) { + compressionThreshold = systemSetting.get(); + } else { + Profile profile = configuration.option(PROFILE_FILE_SUPPLIER).get() + .profile(configuration.option(PROFILE_NAME)).orElse(null); + if (profile != null) { + Optional profileSetting = profile.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES); + if (profileSetting.isPresent()) { + compressionThreshold = Integer.parseInt(profileSetting.get()); + } + } + } + } + + CompressionConfiguration compressionConfig = + CompressionConfiguration.builder() + .requestCompressionEnabled(compressionEnabled) + .minimumCompressionThresholdInBytes(compressionThreshold) + .build(); + + return configuration.toBuilder().option(COMPRESSION_CONFIGURATION, compressionConfig).build(); + } + + private Optional getCompressionEnabled(SdkClientConfiguration configuration) { + if (configuration.option(COMPRESSION_CONFIGURATION) == null) { + return Optional.empty(); + } + return Optional.ofNullable(configuration.option(COMPRESSION_CONFIGURATION).requestCompressionEnabled()); + } + + private Optional getCompressionThreshold(SdkClientConfiguration configuration) { + if (configuration.option(COMPRESSION_CONFIGURATION) == null) { + return Optional.empty(); + } + return Optional.ofNullable(configuration.option(COMPRESSION_CONFIGURATION).minimumCompressionThresholdInBytes()); } /** @@ -607,6 +682,4 @@ public void close() { // Do nothing, this client is managed by the customer. } } - - } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 9f3f7dfbb4a5..e218310d149d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -27,6 +27,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ToBuilderIgnoreField; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; @@ -67,6 +68,7 @@ public final class ClientOverrideConfiguration private final List metricPublishers; private final ExecutionAttributes executionAttributes; private final ScheduledExecutorService scheduledExecutorService; + private final CompressionConfiguration compressionConfiguration; /** * Initialize this configuration. Private to require use of {@link #builder()}. @@ -84,6 +86,7 @@ private ClientOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.scheduledExecutorService = builder.scheduledExecutorService(); + this.compressionConfiguration = builder.compressionConfiguration(); } @Override @@ -101,7 +104,8 @@ public Builder toBuilder() { .defaultProfileName(defaultProfileName) .executionAttributes(executionAttributes) .metricPublishers(metricPublishers) - .scheduledExecutorService(scheduledExecutorService); + .scheduledExecutorService(scheduledExecutorService) + .compressionConfiguration(compressionConfiguration); } /** @@ -244,20 +248,31 @@ public ExecutionAttributes executionAttributes() { return executionAttributes; } + /** + * The compression configuration object, which includes options to enable/disable compression and set the minimum + * compression threshold. + * + * @see Builder#compressionConfiguration(CompressionConfiguration) + */ + public Optional compressionConfiguration() { + return Optional.ofNullable(compressionConfiguration); + } + @Override public String toString() { return ToString.builder("ClientOverrideConfiguration") - .add("headers", headers) - .add("retryPolicy", retryPolicy) - .add("retryStrategy", retryStrategy) - .add("apiCallTimeout", apiCallTimeout) - .add("apiCallAttemptTimeout", apiCallAttemptTimeout) - .add("executionInterceptors", executionInterceptors) - .add("advancedOptions", advancedOptions) - .add("profileFile", defaultProfileFile) - .add("profileName", defaultProfileName) - .add("scheduledExecutorService", scheduledExecutorService) - .build(); + .add("headers", headers) + .add("retryPolicy", retryPolicy) + .add("retryStrategy", retryStrategy) + .add("apiCallTimeout", apiCallTimeout) + .add("apiCallAttemptTimeout", apiCallAttemptTimeout) + .add("executionInterceptors", executionInterceptors) + .add("advancedOptions", advancedOptions) + .add("profileFile", defaultProfileFile) + .add("profileName", defaultProfileName) + .add("scheduledExecutorService", scheduledExecutorService) + .add("compressionConfiguration", compressionConfiguration) + .build(); } /** @@ -556,6 +571,22 @@ default Builder retryStrategy(Consumer> mutator) { Builder putExecutionAttribute(ExecutionAttribute attribute, T value); ExecutionAttributes executionAttributes(); + + /** + * Sets the {@link CompressionConfiguration} for this client. + */ + Builder compressionConfiguration(CompressionConfiguration compressionConfiguration); + + /** + * Sets the {@link CompressionConfiguration} for this client. + */ + default Builder compressionConfiguration(Consumer compressionConfiguration) { + return compressionConfiguration(CompressionConfiguration.builder() + .applyMutation(compressionConfiguration) + .build()); + } + + CompressionConfiguration compressionConfiguration(); } /** @@ -574,6 +605,7 @@ private static final class DefaultClientOverrideConfigurationBuilder implements private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributes = ExecutionAttributes.builder(); private ScheduledExecutorService scheduledExecutorService; + private CompressionConfiguration compressionConfiguration; @Override public Builder headers(Map> headers) { @@ -779,9 +811,24 @@ public ExecutionAttributes executionAttributes() { return executionAttributes.build(); } + @Override + public Builder compressionConfiguration(CompressionConfiguration compressionConfiguration) { + this.compressionConfiguration = compressionConfiguration; + return this; + } + + public void setRequestCompressionEnabled(CompressionConfiguration compressionConfiguration) { + compressionConfiguration(compressionConfiguration); + } + + @Override + public CompressionConfiguration compressionConfiguration() { + return compressionConfiguration; + } + @Override public ClientOverrideConfiguration build() { return new ClientOverrideConfiguration(this); } } -} \ No newline at end of file +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 2f528beaecb0..4d301cab057f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -23,6 +23,7 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -197,6 +198,12 @@ public final class SdkClientOption extends ClientOption { public static final SdkClientOption CLIENT_CONTEXT_PARAMS = new SdkClientOption<>(AttributeMap.class); + /** + * Option to specify the compression configuration settings. + */ + public static final SdkClientOption COMPRESSION_CONFIGURATION = + new SdkClientOption<>(CompressionConfiguration.class); + private SdkClientOption(Class valueClass) { super(valueClass); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 6e71448dc98f..4abbb390a60f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -109,7 +109,6 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute HTTP_RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "HttpResponseChecksumValidation"); - protected SdkExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 3080d0fd47b3..75e999bc1020 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; @@ -92,6 +93,12 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute IS_DISCOVERED_ENDPOINT = new ExecutionAttribute<>("IsDiscoveredEndpoint"); + /** + * The supported compression algorithms for an operation, and whether the operation is streaming or not. + */ + public static final ExecutionAttribute REQUEST_COMPRESSION = + new ExecutionAttribute<>("RequestCompression"); + private SdkInternalExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java index 146007927c63..3f7dc927a95b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java @@ -16,7 +16,8 @@ package software.amazon.awssdk.core.internal.async; import static software.amazon.awssdk.core.HttpChecksumConstant.DEFAULT_ASYNC_CHUNK_SIZE; -import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumContentLength; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.LAST_CHUNK_LEN; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumTrailerLength; import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChunkLength; import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.createChecksumTrailer; import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.createChunk; @@ -28,11 +29,13 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.DelegatingSubscriber; import software.amazon.awssdk.utils.builder.SdkBuilder; /** @@ -129,13 +132,12 @@ public ChecksumCalculatingAsyncRequestBody.Builder trailerHeader(String trailerH @Override public Optional contentLength() { - if (wrapped.contentLength().isPresent() && algorithm != null) { return Optional.of(calculateChunkLength(wrapped.contentLength().get()) - + calculateChecksumContentLength(algorithm, trailerHeader)); - } else { - return wrapped.contentLength(); + + LAST_CHUNK_LEN + + calculateChecksumTrailerLength(algorithm, trailerHeader)); } + return wrapped.contentLength(); } @Override @@ -149,12 +151,15 @@ public void subscribe(Subscriber s) { if (sdkChecksum != null) { sdkChecksum.reset(); } - SynchronousChunkBuffer synchronousChunkBuffer = new SynchronousChunkBuffer(totalBytes); - wrapped.flatMapIterable(synchronousChunkBuffer::buffer) + alwaysInvokeOnNext(wrapped).flatMapIterable(synchronousChunkBuffer::buffer) .subscribe(new ChecksumCalculatingSubscriber(s, sdkChecksum, trailerHeader, totalBytes)); } + private SdkPublisher alwaysInvokeOnNext(SdkPublisher source) { + return subscriber -> source.subscribe(new OnNextGuaranteedSubscriber(subscriber)); + } + private static final class ChecksumCalculatingSubscriber implements Subscriber { private final Subscriber wrapped; @@ -243,4 +248,30 @@ private Iterable buffer(ByteBuffer bytes) { } } + public static class OnNextGuaranteedSubscriber extends DelegatingSubscriber { + + private volatile boolean onNextInvoked; + + public OnNextGuaranteedSubscriber(Subscriber subscriber) { + super(subscriber); + } + + @Override + public void onNext(ByteBuffer t) { + if (!onNextInvoked) { + onNextInvoked = true; + } + + subscriber.onNext(t); + } + + @Override + public void onComplete() { + if (!onNextInvoked) { + subscriber.onNext(ByteBuffer.wrap(new byte[0])); + } + super.onComplete(); + } + } + } \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index c171b0787678..ee0f20b64969 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -21,14 +21,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; /** - * Class that will buffer incoming BufferBytes of totalBytes length to chunks of bufferSize* + * Class that will buffer incoming BufferBytes to chunks of bufferSize. + * If totalBytes is not provided, i.e. content-length is unknown, {@link #getBufferedData()} should be used in the Subscriber's + * {@code onComplete()} to check for a final chunk that is smaller than the chunk size, and send if present. */ @SdkInternalApi public final class ChunkBuffer { @@ -36,11 +38,9 @@ public final class ChunkBuffer { private final AtomicLong transferredBytes; private final ByteBuffer currentBuffer; private final int chunkSize; - private final long totalBytes; + private final Long totalBytes; private ChunkBuffer(Long totalBytes, Integer bufferSize) { - Validate.notNull(totalBytes, "The totalBytes must not be null"); - int chunkSize = bufferSize != null ? bufferSize : DEFAULT_ASYNC_CHUNK_SIZE; this.chunkSize = chunkSize; this.currentBuffer = ByteBuffer.allocate(chunkSize); @@ -52,14 +52,12 @@ public static Builder builder() { return new DefaultBuilder(); } - /** * Split the input {@link ByteBuffer} into multiple smaller {@link ByteBuffer}s, each of which contains {@link #chunkSize} * worth of bytes. If the last chunk of the input ByteBuffer contains less than {@link #chunkSize} data, the last chunk will * be buffered. */ public synchronized Iterable split(ByteBuffer inputByteBuffer) { - if (!inputByteBuffer.hasRemaining()) { return Collections.singletonList(inputByteBuffer); } @@ -71,7 +69,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { fillCurrentBuffer(inputByteBuffer); if (isCurrentBufferFull()) { - addCurrentBufferToIterable(byteBuffers, chunkSize); + addCurrentBufferToIterable(byteBuffers); } } @@ -82,8 +80,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { // If this is the last chunk, add data buffered to the iterable if (isLastChunk()) { - int remainingBytesInBuffer = currentBuffer.position(); - addCurrentBufferToIterable(byteBuffers, remainingBytesInBuffer); + addCurrentBufferToIterable(byteBuffers); } return byteBuffers; } @@ -111,19 +108,38 @@ private void splitRemainingInputByteBuffer(ByteBuffer inputByteBuffer, List getBufferedData() { + int remainingBytesInBuffer = currentBuffer.position(); + + if (remainingBytesInBuffer == 0) { + return Optional.empty(); + } + + ByteBuffer bufferedChunk = ByteBuffer.allocate(remainingBytesInBuffer); + currentBuffer.flip(); + bufferedChunk.put(currentBuffer); + bufferedChunk.flip(); + return Optional.of(bufferedChunk); + } + private boolean isLastChunk() { + if (totalBytes == null) { + return false; + } long remainingBytes = totalBytes - transferredBytes.get(); return remainingBytes != 0 && remainingBytes == currentBuffer.position(); } - private void addCurrentBufferToIterable(List byteBuffers, int capacity) { - ByteBuffer bufferedChunk = ByteBuffer.allocate(capacity); - currentBuffer.flip(); - bufferedChunk.put(currentBuffer); - bufferedChunk.flip(); - byteBuffers.add(bufferedChunk); - transferredBytes.addAndGet(bufferedChunk.remaining()); - currentBuffer.clear(); + private void addCurrentBufferToIterable(List byteBuffers) { + Optional bufferedChunk = getBufferedData(); + if (bufferedChunk.isPresent()) { + byteBuffers.add(bufferedChunk.get()); + transferredBytes.addAndGet(bufferedChunk.get().remaining()); + currentBuffer.clear(); + } } private void fillCurrentBuffer(ByteBuffer inputByteBuffer) { @@ -151,8 +167,6 @@ public interface Builder extends SdkBuilder { Builder bufferSize(int bufferSize); Builder totalBytes(long totalBytes); - - } private static final class DefaultBuilder implements Builder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java new file mode 100644 index 000000000000..246bcb10578f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static software.amazon.awssdk.core.internal.io.AwsChunkedInputStream.DEFAULT_CHUNK_SIZE; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Optional; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.DelegatingSubscriber; +import software.amazon.awssdk.utils.async.FlatteningSubscriber; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +/** + * Wrapper class to wrap an AsyncRequestBody. + * This will chunk and compress the payload with the provided {@link Compressor}. + */ +@SdkInternalApi +public class CompressionAsyncRequestBody implements AsyncRequestBody { + + private final AsyncRequestBody wrapped; + private final Compressor compressor; + private final ChunkBuffer chunkBuffer; + + private CompressionAsyncRequestBody(DefaultBuilder builder) { + this.wrapped = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); + this.compressor = Validate.paramNotNull(builder.compressor, "compressor"); + int chunkSize = builder.chunkSize != null ? builder.chunkSize : DEFAULT_CHUNK_SIZE; + this.chunkBuffer = ChunkBuffer.builder() + .bufferSize(chunkSize) + .build(); + } + + @Override + public void subscribe(Subscriber s) { + Validate.notNull(s, "Subscription MUST NOT be null."); + + SdkPublisher> split = + split(wrapped).addTrailingData(() -> Collections.singleton(getBufferedDataIfPresent())); + SdkPublisher flattening = flattening(split); + flattening.map(compressor::compress).subscribe(s); + } + + @Override + public Optional contentLength() { + return wrapped.contentLength(); + } + + @Override + public String contentType() { + return wrapped.contentType(); + } + + private SdkPublisher> split(SdkPublisher source) { + return subscriber -> source.subscribe(new SplittingSubscriber(subscriber)); + } + + private Iterable getBufferedDataIfPresent() { + return chunkBuffer.getBufferedData() + .map(Collections::singletonList) + .orElse(Collections.emptyList()); + } + + private SdkPublisher flattening(SdkPublisher> source) { + return subscriber -> source.subscribe(new FlatteningSubscriber<>(subscriber)); + } + + /** + * @return Builder instance to construct a {@link CompressionAsyncRequestBody}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + public interface Builder extends SdkBuilder { + + /** + * Sets the AsyncRequestBody that will be wrapped. + * @param asyncRequestBody + * @return This builder for method chaining. + */ + Builder asyncRequestBody(AsyncRequestBody asyncRequestBody); + + /** + * Sets the compressor to compress the request. + * @param compressor + * @return This builder for method chaining. + */ + Builder compressor(Compressor compressor); + + /** + * Sets the chunk size. Default size is 128 * 1024. + * @param chunkSize + * @return This builder for method chaining. + */ + Builder chunkSize(Integer chunkSize); + } + + private static final class DefaultBuilder implements Builder { + + private AsyncRequestBody asyncRequestBody; + private Compressor compressor; + private Integer chunkSize; + + @Override + public CompressionAsyncRequestBody build() { + return new CompressionAsyncRequestBody(this); + } + + @Override + public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { + this.asyncRequestBody = asyncRequestBody; + return this; + } + + @Override + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + + @Override + public Builder chunkSize(Integer chunkSize) { + this.chunkSize = chunkSize; + return this; + } + } + + private final class SplittingSubscriber extends DelegatingSubscriber> { + + protected SplittingSubscriber(Subscriber> subscriber) { + super(subscriber); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + Iterable buffers = chunkBuffer.split(byteBuffer); + subscriber.onNext(buffers); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java index 8f7b2a483607..f8bbdd552088 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java @@ -33,9 +33,12 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.core.internal.util.NoopSubscription; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.NumericUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; @@ -65,16 +68,47 @@ public final class FileAsyncRequestBody implements AsyncRequestBody { * Size (in bytes) of ByteBuffer chunks read from the file and delivered to the subscriber. */ private final int chunkSizeInBytes; + private final long position; + private final long numBytesToRead; private FileAsyncRequestBody(DefaultBuilder builder) { this.path = builder.path; this.chunkSizeInBytes = builder.chunkSizeInBytes == null ? DEFAULT_CHUNK_SIZE : builder.chunkSizeInBytes; this.fileLength = invokeSafely(() -> Files.size(path)); + this.position = builder.position == null ? 0 : Validate.isNotNegative(builder.position, "position"); + this.numBytesToRead = builder.numBytesToRead == null ? fileLength - this.position : + Validate.isNotNegative(builder.numBytesToRead, "numBytesToRead"); + } + + @Override + public SdkPublisher split(AsyncRequestBodySplitConfiguration splitConfiguration) { + Validate.notNull(splitConfiguration, "splitConfiguration"); + return new FileAsyncRequestBodySplitHelper(this, splitConfiguration).split(); + } + + public Path path() { + return path; + } + + public long fileLength() { + return fileLength; + } + + public int chunkSizeInBytes() { + return chunkSizeInBytes; + } + + public long position() { + return position; + } + + public long numBytesToRead() { + return numBytesToRead; } @Override public Optional contentLength() { - return Optional.of(fileLength); + return Optional.of(numBytesToRead); } @Override @@ -91,7 +125,7 @@ public void subscribe(Subscriber s) { // We need to synchronize here because the subscriber could call // request() from within onSubscribe which would potentially // trigger onNext before onSubscribe is finished. - Subscription subscription = new FileSubscription(path, channel, s, chunkSizeInBytes); + Subscription subscription = new FileSubscription(channel, s); synchronized (subscription) { s.onSubscribe(subscription); @@ -128,7 +162,7 @@ public interface Builder extends SdkBuilder { Builder path(Path path); /** - * Sets the size of chunks read from the file. Increasing this will cause more data to be buffered into memory but + * Sets the size of chunks to read from the file. Increasing this will cause more data to be buffered into memory but * may yield better latencies. Decreasing this will reduce memory usage but may cause reduced latency. Setting this value * is very dependent on upload speed and requires some performance testing to tune. * @@ -139,12 +173,33 @@ public interface Builder extends SdkBuilder { */ Builder chunkSizeInBytes(Integer chunkSize); + /** + * Sets the file position at which the request body begins. + * + *

    By default, it's 0, i.e., reading from the beginning. + * + * @param position the position of the file + * @return The builder for method chaining. + */ + Builder position(Long position); + + /** + * Sets the number of bytes to read from this file. + * + *

    By default, it's same as the file length. + * + * @param numBytesToRead number of bytes to read + * @return The builder for method chaining. + */ + Builder numBytesToRead(Long numBytesToRead); } private static final class DefaultBuilder implements Builder { + private Long position; private Path path; private Integer chunkSizeInBytes; + private Long numBytesToRead; @Override public Builder path(Path path) { @@ -162,6 +217,18 @@ public Builder chunkSizeInBytes(Integer chunkSizeInBytes) { return this; } + @Override + public Builder position(Long position) { + this.position = position; + return this; + } + + @Override + public Builder numBytesToRead(Long numBytesToRead) { + this.numBytesToRead = numBytesToRead; + return this; + } + public void setChunkSizeInBytes(Integer chunkSizeInBytes) { chunkSizeInBytes(chunkSizeInBytes); } @@ -175,14 +242,12 @@ public FileAsyncRequestBody build() { /** * Reads the file for one subscriber. */ - private static final class FileSubscription implements Subscription { - private final Path path; + private final class FileSubscription implements Subscription { private final AsynchronousFileChannel inputChannel; private final Subscriber subscriber; - private final int chunkSize; - private final AtomicLong position = new AtomicLong(0); - private final AtomicLong remainingBytes = new AtomicLong(0); + private final AtomicLong currentPosition; + private final AtomicLong remainingBytes; private final long sizeAtStart; private final FileTime modifiedTimeAtStart; private long outstandingDemand = 0; @@ -190,17 +255,14 @@ private static final class FileSubscription implements Subscription { private volatile boolean done = false; private final Object lock = new Object(); - private FileSubscription(Path path, - AsynchronousFileChannel inputChannel, - Subscriber subscriber, - int chunkSize) throws IOException { - this.path = path; + private FileSubscription(AsynchronousFileChannel inputChannel, + Subscriber subscriber) throws IOException { this.inputChannel = inputChannel; this.subscriber = subscriber; - this.chunkSize = chunkSize; this.sizeAtStart = inputChannel.size(); this.modifiedTimeAtStart = Files.getLastModifiedTime(path); - this.remainingBytes.set(Validate.isNotNegative(sizeAtStart, "size")); + this.remainingBytes = new AtomicLong(numBytesToRead); + this.currentPosition = new AtomicLong(position); } @Override @@ -255,8 +317,8 @@ private void readData() { return; } - ByteBuffer buffer = ByteBuffer.allocate(chunkSize); - inputChannel.read(buffer, position.get(), buffer, new CompletionHandler() { + ByteBuffer buffer = ByteBuffer.allocate(Math.min(chunkSizeInBytes, NumericUtils.saturatedCast(remainingBytes.get()))); + inputChannel.read(buffer, currentPosition.get(), buffer, new CompletionHandler() { @Override public void completed(Integer result, ByteBuffer attachment) { try { @@ -264,7 +326,7 @@ public void completed(Integer result, ByteBuffer attachment) { attachment.flip(); int readBytes = attachment.remaining(); - position.addAndGet(readBytes); + currentPosition.addAndGet(readBytes); remainingBytes.addAndGet(-readBytes); signalOnNext(attachment); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java new file mode 100644 index 000000000000..4b0acfbd81f2 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java @@ -0,0 +1,185 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.SimplePublisher; + +/** + * A helper class to split a {@link FileAsyncRequestBody} to multiple smaller async request bodies. It ensures the buffer used to + * be under the configured size via {@link AsyncRequestBodySplitConfiguration#bufferSizeInBytes()} by tracking the number of + * concurrent ongoing {@link AsyncRequestBody}s. + */ +@SdkInternalApi +public final class FileAsyncRequestBodySplitHelper { + private static final Logger log = Logger.loggerFor(FileAsyncRequestBodySplitHelper.class); + + private final AtomicBoolean isSendingRequestBody = new AtomicBoolean(false); + private final AtomicLong remainingBytes; + + private final long totalContentLength; + private final Path path; + private final int bufferPerAsyncRequestBody; + private final long totalBufferSize; + private final long chunkSize; + + private volatile boolean isDone = false; + + private AtomicInteger numAsyncRequestBodiesInFlight = new AtomicInteger(0); + private AtomicInteger chunkIndex = new AtomicInteger(0); + + public FileAsyncRequestBodySplitHelper(FileAsyncRequestBody asyncRequestBody, + AsyncRequestBodySplitConfiguration splitConfiguration) { + Validate.notNull(asyncRequestBody, "asyncRequestBody"); + Validate.notNull(splitConfiguration, "splitConfiguration"); + Validate.isTrue(asyncRequestBody.contentLength().isPresent(), "Content length must be present", asyncRequestBody); + this.totalContentLength = asyncRequestBody.contentLength().get(); + this.remainingBytes = new AtomicLong(totalContentLength); + this.path = asyncRequestBody.path(); + this.chunkSize = splitConfiguration.chunkSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().chunkSizeInBytes() : + splitConfiguration.chunkSizeInBytes(); + this.totalBufferSize = splitConfiguration.bufferSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().bufferSizeInBytes() : + splitConfiguration.bufferSizeInBytes(); + this.bufferPerAsyncRequestBody = asyncRequestBody.chunkSizeInBytes(); + } + + public SdkPublisher split() { + + SimplePublisher simplePublisher = new SimplePublisher<>(); + + try { + sendAsyncRequestBody(simplePublisher); + } catch (Throwable throwable) { + simplePublisher.error(throwable); + } + + return SdkPublisher.adapt(simplePublisher); + } + + private void sendAsyncRequestBody(SimplePublisher simplePublisher) { + do { + if (!isSendingRequestBody.compareAndSet(false, true)) { + return; + } + + try { + doSendAsyncRequestBody(simplePublisher); + } finally { + isSendingRequestBody.set(false); + } + } while (shouldSendMore()); + } + + private void doSendAsyncRequestBody(SimplePublisher simplePublisher) { + while (shouldSendMore()) { + AsyncRequestBody currentAsyncRequestBody = newFileAsyncRequestBody(simplePublisher); + simplePublisher.send(currentAsyncRequestBody); + numAsyncRequestBodiesInFlight.incrementAndGet(); + checkCompletion(simplePublisher, currentAsyncRequestBody); + } + } + + private void checkCompletion(SimplePublisher simplePublisher, AsyncRequestBody currentAsyncRequestBody) { + long remaining = remainingBytes.addAndGet(-currentAsyncRequestBody.contentLength().get()); + + if (remaining == 0) { + isDone = true; + simplePublisher.complete(); + } else if (remaining < 0) { + isDone = true; + simplePublisher.error(SdkClientException.create( + "Unexpected error occurred. Remaining data is negative: " + remaining)); + } + } + + private void startNextRequestBody(SimplePublisher simplePublisher) { + numAsyncRequestBodiesInFlight.decrementAndGet(); + sendAsyncRequestBody(simplePublisher); + } + + private AsyncRequestBody newFileAsyncRequestBody(SimplePublisher simplePublisher) { + long position = chunkSize * chunkIndex.getAndIncrement(); + long numBytesToReadForThisChunk = Math.min(totalContentLength - position, chunkSize); + FileAsyncRequestBody fileAsyncRequestBody = FileAsyncRequestBody.builder() + .path(path) + .position(position) + .numBytesToRead(numBytesToReadForThisChunk) + .build(); + return new FileAsyncRequestBodyWrapper(fileAsyncRequestBody, simplePublisher); + } + + /** + * Should not send more if it's done OR sending next request body would exceed the total buffer size + */ + private boolean shouldSendMore() { + if (isDone) { + return false; + } + + long currentUsedBuffer = (long) numAsyncRequestBodiesInFlight.get() * bufferPerAsyncRequestBody; + return currentUsedBuffer + bufferPerAsyncRequestBody <= totalBufferSize; + } + + @SdkTestInternalApi + AtomicInteger numAsyncRequestBodiesInFlight() { + return numAsyncRequestBodiesInFlight; + } + + private final class FileAsyncRequestBodyWrapper implements AsyncRequestBody { + + private final FileAsyncRequestBody fileAsyncRequestBody; + private final SimplePublisher simplePublisher; + + FileAsyncRequestBodyWrapper(FileAsyncRequestBody fileAsyncRequestBody, + SimplePublisher simplePublisher) { + this.fileAsyncRequestBody = fileAsyncRequestBody; + this.simplePublisher = simplePublisher; + } + + @Override + public void subscribe(Subscriber s) { + fileAsyncRequestBody.doAfterOnComplete(() -> startNextRequestBody(simplePublisher)) + // The reason we still need to call startNextRequestBody when the subscription is + // cancelled is that upstream could cancel the subscription even though the stream has + // finished successfully before onComplete. If this happens, doAfterOnComplete callback + // will never be invoked, and if the current buffer is full, the publisher will stop + // sending new FileAsyncRequestBody, leading to uncompleted future. + .doAfterOnCancel(() -> startNextRequestBody(simplePublisher)) + .subscribe(s); + } + + @Override + public Optional contentLength() { + return fileAsyncRequestBody.contentLength(); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java index c56d1b6437d9..6d8d18a14754 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java @@ -24,6 +24,7 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.exception.NonRetryableException; import software.amazon.awssdk.core.internal.util.NoopSubscription; @@ -41,18 +42,24 @@ @SdkInternalApi public class SplittingPublisher implements SdkPublisher { private static final Logger log = Logger.loggerFor(SplittingPublisher.class); - private static final long DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024L; - private static final long DEFAULT_BUFFER_SIZE = DEFAULT_CHUNK_SIZE * 4; private final AsyncRequestBody upstreamPublisher; private final SplittingSubscriber splittingSubscriber; private final SimplePublisher downstreamPublisher = new SimplePublisher<>(); private final long chunkSizeInBytes; private final long bufferSizeInBytes; - private SplittingPublisher(Builder builder) { - this.upstreamPublisher = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); - this.chunkSizeInBytes = builder.chunkSizeInBytes == null ? DEFAULT_CHUNK_SIZE : builder.chunkSizeInBytes; - this.bufferSizeInBytes = builder.bufferSizeInBytes == null ? DEFAULT_BUFFER_SIZE : builder.bufferSizeInBytes; + public SplittingPublisher(AsyncRequestBody asyncRequestBody, + AsyncRequestBodySplitConfiguration splitConfiguration) { + this.upstreamPublisher = Validate.paramNotNull(asyncRequestBody, "asyncRequestBody"); + Validate.notNull(splitConfiguration, "splitConfiguration"); + this.chunkSizeInBytes = splitConfiguration.chunkSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().chunkSizeInBytes() : + splitConfiguration.chunkSizeInBytes(); + + this.bufferSizeInBytes = splitConfiguration.bufferSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().bufferSizeInBytes() : + splitConfiguration.bufferSizeInBytes(); + this.splittingSubscriber = new SplittingSubscriber(upstreamPublisher.contentLength().orElse(null)); if (!upstreamPublisher.contentLength().isPresent()) { @@ -62,10 +69,6 @@ private SplittingPublisher(Builder builder) { } } - public static Builder builder() { - return new Builder(); - } - @Override public void subscribe(Subscriber downstreamSubscriber) { downstreamPublisher.subscribe(downstreamSubscriber); @@ -303,29 +306,4 @@ private void addDataBuffered(int length) { } } } - - public static final class Builder { - private AsyncRequestBody asyncRequestBody; - private Long chunkSizeInBytes; - private Long bufferSizeInBytes; - - public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { - this.asyncRequestBody = asyncRequestBody; - return this; - } - - public Builder chunkSizeInBytes(Long chunkSizeInBytes) { - this.chunkSizeInBytes = chunkSizeInBytes; - return this; - } - - public Builder bufferSizeInBytes(Long bufferSizeInBytes) { - this.bufferSizeInBytes = bufferSizeInBytes; - return this; - } - - public SplittingPublisher build() { - return new SplittingPublisher(this); - } - } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java new file mode 100644 index 000000000000..503752c26dab --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/Compressor.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import java.io.InputStream; +import java.nio.ByteBuffer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; + +/** + * Interface for compressors used by {@link CompressRequestStage} to compress requests. + */ +@SdkInternalApi +public interface Compressor { + + /** + * The compression algorithm type. + * + * @return The {@link String} compression algorithm type. + */ + String compressorType(); + + /** + * Compress a {@link SdkBytes} payload. + * + * @param content + * @return The compressed {@link SdkBytes}. + */ + SdkBytes compress(SdkBytes content); + + /** + * Compress a byte[] payload. + * + * @param content + * @return The compressed byte array. + */ + default byte[] compress(byte[] content) { + return compress(SdkBytes.fromByteArray(content)).asByteArray(); + } + + /** + * Compress an {@link InputStream} payload. + * + * @param content + * @return The compressed {@link InputStream}. + */ + default InputStream compress(InputStream content) { + return compress(SdkBytes.fromInputStream(content)).asInputStream(); + } + + /** + * Compress an {@link ByteBuffer} payload. + * + * @param content + * @return The compressed {@link ByteBuffer}. + */ + default ByteBuffer compress(ByteBuffer content) { + return compress(SdkBytes.fromByteBuffer(content)).asByteBuffer(); + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java new file mode 100644 index 000000000000..6b9b1ae11085 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/CompressorType.java @@ -0,0 +1,115 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * The supported compression algorithms for operations with the requestCompression trait. Each supported algorithm will have an + * {@link Compressor} implementation. + */ +@SdkInternalApi +public final class CompressorType { + + public static final CompressorType GZIP = CompressorType.of("gzip"); + + private static Map compressorMap = new HashMap() {{ + put("gzip", new GzipCompressor()); + }}; + + private final String id; + + private CompressorType(String id) { + this.id = id; + } + + /** + * Creates a new {@link CompressorType} of the given value. + */ + public static CompressorType of(String value) { + Validate.paramNotBlank(value, "compressionType"); + return CompressorTypeCache.put(value); + } + + /** + * Returns the {@link Set} of {@link String}s of compressor types supported by the SDK. + */ + public static Set compressorTypes() { + return compressorMap.keySet(); + } + + /** + * Whether or not the compressor type is supported by the SDK. + */ + public static boolean isSupported(String compressionType) { + return compressorTypes().contains(compressionType); + } + + /** + * Maps the {@link CompressorType} to its corresponding {@link Compressor}. + */ + public Compressor newCompressor() { + Compressor compressor = compressorMap.getOrDefault(this.id, null); + if (compressor == null) { + throw new UnsupportedOperationException("The compression type " + id + " does not have an implementation of " + + "Compressor"); + } + return compressor; + } + + @Override + public String toString() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompressorType that = (CompressorType) o; + return Objects.equals(id, that.id) + && Objects.equals(compressorMap, that.compressorMap); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (compressorMap != null ? compressorMap.hashCode() : 0); + return result; + } + + private static class CompressorTypeCache { + private static final ConcurrentHashMap VALUES = new ConcurrentHashMap<>(); + + private CompressorTypeCache() { + } + + private static CompressorType put(String value) { + return VALUES.computeIfAbsent(value, v -> new CompressorType(value)); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java new file mode 100644 index 000000000000..b849b81fe0ca --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static software.amazon.awssdk.utils.IoUtils.closeQuietly; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.zip.GZIPOutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; + +@SdkInternalApi +public final class GzipCompressor implements Compressor { + + private static final String COMPRESSOR_TYPE = "gzip"; + private static final Logger log = LoggerFactory.getLogger(GzipCompressor.class); + + @Override + public String compressorType() { + return COMPRESSOR_TYPE; + } + + @Override + public SdkBytes compress(SdkBytes content) { + GZIPOutputStream gzipOutputStream = null; + try { + ByteArrayOutputStream compressedOutputStream = new ByteArrayOutputStream(); + gzipOutputStream = new GZIPOutputStream(compressedOutputStream); + gzipOutputStream.write(content.asByteArray()); + gzipOutputStream.close(); + return SdkBytes.fromByteArray(compressedOutputStream.toByteArray()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + closeQuietly(gzipOutputStream, log); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index ee14838dea08..58b7971800cc 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage2; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncSigningStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeAsyncHttpRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeRequestImmutableStage; @@ -171,6 +172,7 @@ public CompletableFuture execute( .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.ASYNC)) .then(MakeRequestImmutableStage::new) .then(RequestPipelineBuilder diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index 14cc98f39126..f56dd61e7f78 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyUserAgentStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeTransmissionExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeUnmarshallingExecutionInterceptorsStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HandleResponseStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; @@ -172,6 +173,7 @@ public OutputT execute(HttpResponseHandler> response .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.SYNC)) .then(MakeRequestImmutableStage::new) // End of mutating request diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/async/CombinedResponseAsyncHttpResponseHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/async/CombinedResponseAsyncHttpResponseHandler.java index 257bd063373c..2820037ba082 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/async/CombinedResponseAsyncHttpResponseHandler.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/async/CombinedResponseAsyncHttpResponseHandler.java @@ -68,6 +68,7 @@ public void onError(Throwable error) { } successResponseHandler.onError(error); + errorResponseHandler.onError(error); } @Override diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java index a54fd9678376..3b78dedaf2ad 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -39,6 +40,7 @@ public ApiCallMetricCollectionStage(RequestPipeline execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { MetricCollector metricCollector = context.executionContext().metricCollector(); + MetricUtils.collectServiceEndpointMetrics(metricCollector, input); // Note: at this point, any exception, even a service exception, will // be thrown from the wrapped pipeline so we can't use diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java index 3d57cedea52d..09016026be1c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -40,6 +41,7 @@ public AsyncApiCallMetricCollectionStage(RequestPipeline execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { MetricCollector metricCollector = context.executionContext().metricCollector(); + MetricUtils.collectServiceEndpointMetrics(metricCollector, input); CompletableFuture future = new CompletableFuture<>(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java new file mode 100644 index 000000000000..89920d916004 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -0,0 +1,208 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static software.amazon.awssdk.core.client.config.SdkClientOption.COMPRESSION_CONFIGURATION; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.CompressorType; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; +import software.amazon.awssdk.core.internal.sync.CompressionContentStreamProvider; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Compress requests whose operations are marked with the "requestCompression" C2J trait. + */ +@SdkInternalApi +public class CompressRequestStage implements MutableRequestToRequestPipeline { + private static final int DEFAULT_MIN_COMPRESSION_SIZE = 10_240; + private static final int MIN_COMPRESSION_SIZE_LIMIT = 10_485_760; + private final CompressionConfiguration compressionConfig; + + public CompressRequestStage(HttpClientDependencies dependencies) { + compressionConfig = dependencies.clientConfiguration().option(COMPRESSION_CONFIGURATION); + } + + @Override + public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) + throws Exception { + + if (!shouldCompress(input, context)) { + return input; + } + + Compressor compressor = resolveCompressorType(context.executionAttributes()); + + if (!isStreaming(context)) { + compressEntirePayload(input, compressor); + updateContentEncodingHeader(input, compressor); + updateContentLengthHeader(input); + return input; + } + + if (!isTransferEncodingChunked(input)) { + return input; + } + + if (context.requestProvider() == null) { + input.contentStreamProvider(new CompressionContentStreamProvider(input.contentStreamProvider(), compressor)); + } else { + context.requestProvider(CompressionAsyncRequestBody.builder() + .asyncRequestBody(context.requestProvider()) + .compressor(compressor) + .build()); + } + + updateContentEncodingHeader(input, compressor); + return input; + } + + private boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + if (context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION) == null) { + return false; + } + if (resolveCompressorType(context.executionAttributes()) == null) { + return false; + } + if (!resolveRequestCompressionEnabled(context)) { + return false; + } + if (isStreaming(context)) { + return true; + } + if (input.contentStreamProvider() == null) { + return false; + } + return isRequestSizeWithinThreshold(input, context); + } + + private boolean isStreaming(RequestExecutionContext context) { + return context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).isStreaming(); + } + + private void compressEntirePayload(SdkHttpFullRequest.Builder input, Compressor compressor) { + ContentStreamProvider wrappedProvider = input.contentStreamProvider(); + ContentStreamProvider compressedStreamProvider = () -> compressor.compress(wrappedProvider.newStream()); + input.contentStreamProvider(compressedStreamProvider); + } + + private void updateContentEncodingHeader(SdkHttpFullRequest.Builder input, + Compressor compressor) { + if (input.firstMatchingHeader("Content-encoding").isPresent()) { + input.appendHeader("Content-encoding", compressor.compressorType()); + } else { + input.putHeader("Content-encoding", compressor.compressorType()); + } + } + + private void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { + InputStream inputStream = input.contentStreamProvider().newStream(); + try { + byte[] bytes = IoUtils.toByteArray(inputStream); + String length = String.valueOf(bytes.length); + input.putHeader("Content-Length", length); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private boolean isTransferEncodingChunked(SdkHttpFullRequest.Builder input) { + return input.firstMatchingHeader("Transfer-Encoding") + .map(headerValue -> headerValue.equals("chunked")) + .orElse(false); + } + + private Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { + List encodings = + executionAttributes.getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).getEncodings(); + + for (String encoding: encodings) { + encoding = encoding.toLowerCase(Locale.ROOT); + if (CompressorType.isSupported(encoding)) { + return CompressorType.of(encoding).newCompressor(); + } + } + return null; + } + + private boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { + + Optional requestCompressionEnabledRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::compressionConfiguration) + .map(CompressionConfiguration::requestCompressionEnabled); + if (requestCompressionEnabledRequestLevel.isPresent()) { + return requestCompressionEnabledRequestLevel.get(); + } + + Boolean isEnabled = compressionConfig.requestCompressionEnabled(); + if (isEnabled != null) { + return isEnabled; + } + + return true; + } + + private boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + int minimumCompressionThreshold = resolveMinCompressionSize(context); + validateMinCompressionSizeInput(minimumCompressionThreshold); + int requestSize = SdkBytes.fromInputStream(input.contentStreamProvider().newStream()).asByteArray().length; + return requestSize >= minimumCompressionThreshold; + } + + private int resolveMinCompressionSize(RequestExecutionContext context) { + + Optional minimumCompressionSizeRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::compressionConfiguration) + .map(CompressionConfiguration::minimumCompressionThresholdInBytes); + if (minimumCompressionSizeRequestLevel.isPresent()) { + return minimumCompressionSizeRequestLevel.get(); + } + + Integer threshold = compressionConfig.minimumCompressionThresholdInBytes(); + if (threshold != null) { + return threshold; + } + + return DEFAULT_MIN_COMPRESSION_SIZE; + } + + private void validateMinCompressionSizeInput(int minCompressionSize) { + if (!(minCompressionSize >= 0 && minCompressionSize <= MIN_COMPRESSION_SIZE_LIMIT)) { + throw SdkClientException.create("The minimum compression size must be non-negative with a maximum value of " + + "10485760.", new IllegalArgumentException()); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java index 84dcd981b22f..aaf1c27428d9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java @@ -20,8 +20,8 @@ import static software.amazon.awssdk.core.HttpChecksumConstant.DEFAULT_ASYNC_CHUNK_SIZE; import static software.amazon.awssdk.core.HttpChecksumConstant.SIGNING_METHOD; import static software.amazon.awssdk.core.internal.io.AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE; -import static software.amazon.awssdk.core.internal.io.AwsUnsignedChunkedEncodingInputStream.calculateStreamContentLength; -import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumContentLength; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumTrailerLength; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateStreamContentLength; import static software.amazon.awssdk.core.internal.util.HttpChecksumResolver.getResolvedChecksumSpecs; import static software.amazon.awssdk.http.Header.CONTENT_LENGTH; @@ -179,7 +179,7 @@ private void addFlexibleChecksumInTrailer(SdkHttpFullRequest.Builder request, Re } } - long checksumContentLength = calculateChecksumContentLength(checksumSpecs.algorithm(), checksumSpecs.headerName()); + long checksumContentLength = calculateChecksumTrailerLength(checksumSpecs.algorithm(), checksumSpecs.headerName()); long contentLen = checksumContentLength + calculateStreamContentLength(originalContentLength, chunkSize); request.putHeader(HttpChecksumConstant.HEADER_FOR_TRAILER_REFERENCE, checksumSpecs.headerName()) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java new file mode 100644 index 000000000000..5be35f0ae46f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.interceptor.trait; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class RequestCompression { + + private List encodings; + private boolean isStreaming; + + private RequestCompression(Builder builder) { + this.encodings = builder.encodings; + this.isStreaming = builder.isStreaming; + } + + public List getEncodings() { + return encodings; + } + + public boolean isStreaming() { + return isStreaming; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + + private List encodings; + private boolean isStreaming; + + public Builder encodings(List encodings) { + this.encodings = encodings; + return this; + } + + public Builder encodings(String... encodings) { + if (encodings != null) { + this.encodings = Arrays.asList(encodings); + } + return this; + } + + public Builder isStreaming(boolean isStreaming) { + this.isStreaming = isStreaming; + return this; + } + + public RequestCompression build() { + return new RequestCompression(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RequestCompression that = (RequestCompression) o; + return isStreaming == that.isStreaming() + && Objects.equals(encodings, that.getEncodings()); + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + (isStreaming ? 1 : 0); + hashCode = 31 * hashCode + Objects.hashCode(encodings); + return hashCode; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java index f382bd5ced40..ec4870f5e686 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java @@ -22,8 +22,6 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.internal.chunked.AwsChunkedEncodingConfig; -import software.amazon.awssdk.core.io.SdkInputStream; -import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; /** @@ -37,37 +35,18 @@ * the wrapped stream. */ @SdkInternalApi -public abstract class AwsChunkedEncodingInputStream extends SdkInputStream { +public abstract class AwsChunkedEncodingInputStream extends AwsChunkedInputStream { - public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; - protected static final int SKIP_BUFFER_SIZE = 256 * 1024; protected static final String CRLF = "\r\n"; protected static final byte[] FINAL_CHUNK = new byte[0]; protected static final String HEADER_COLON_SEPARATOR = ":"; - private static final Logger log = Logger.loggerFor(AwsChunkedEncodingInputStream.class); protected byte[] calculatedChecksum = null; protected final String checksumHeaderForTrailer; protected boolean isTrailingTerminated = true; - private InputStream is = null; private final int chunkSize; private final int maxBufferSize; private final SdkChecksum sdkChecksum; private boolean isLastTrailingCrlf; - /** - * Iterator on the current chunk. - */ - private ChunkContentIterator currentChunkIterator; - - /** - * Iterator on the buffer of the decoded stream, - * Null if the wrapped stream is marksupported, - * otherwise it will be initialized when this wrapper is marked. - */ - private DecodedStreamBuffer decodedStreamBuffer; - - private boolean isAtStart = true; - private boolean isTerminating = false; - /** * Creates a chunked encoding input stream initialized with the originating stream. The configuration allows @@ -89,10 +68,10 @@ protected AwsChunkedEncodingInputStream(InputStream in, AwsChunkedEncodingInputStream originalChunkedStream = (AwsChunkedEncodingInputStream) in; providedMaxBufferSize = Math.max(originalChunkedStream.maxBufferSize, providedMaxBufferSize); is = originalChunkedStream.is; - decodedStreamBuffer = originalChunkedStream.decodedStreamBuffer; + underlyingStreamBuffer = originalChunkedStream.underlyingStreamBuffer; } else { is = in; - decodedStreamBuffer = null; + underlyingStreamBuffer = null; } this.chunkSize = awsChunkedEncodingConfig.chunkSize(); this.maxBufferSize = providedMaxBufferSize; @@ -153,19 +132,6 @@ public T checksumHeaderForTrailer(String checksumHeaderForTrailer) { } - @Override - public int read() throws IOException { - byte[] tmp = new byte[1]; - int count = read(tmp, 0, 1); - if (count > 0) { - log.debug(() -> "One byte read from the stream."); - int unsignedByte = (int) tmp[0] & 0xFF; - return unsignedByte; - } else { - return count; - } - } - @Override public int read(byte[] b, int off, int len) throws IOException { abortIfNeeded(); @@ -211,32 +177,6 @@ private boolean setUpTrailingChunks() { return true; } - @Override - public long skip(long n) throws IOException { - if (n <= 0) { - return 0; - } - long remaining = n; - int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); - byte[] temp = new byte[toskip]; - while (remaining > 0) { - int count = read(temp, 0, toskip); - if (count < 0) { - break; - } - remaining -= count; - } - return n - remaining; - } - - /** - * @see java.io.InputStream#markSupported() - */ - @Override - public boolean markSupported() { - return true; - } - /** * The readlimit parameter is ignored. */ @@ -256,7 +196,7 @@ public void mark(int readlimit) { } else { log.debug(() -> "AwsChunkedEncodingInputStream marked at the start of the stream " + "(initializing the buffer since the wrapped stream is not mark-supported)."); - decodedStreamBuffer = new DecodedStreamBuffer(maxBufferSize); + underlyingStreamBuffer = new UnderlyingStreamBuffer(maxBufferSize); } } @@ -280,8 +220,8 @@ public void reset() throws IOException { is.reset(); } else { log.debug(() -> "AwsChunkedEncodingInputStream reset (will use the buffer of the decoded stream)."); - Validate.notNull(decodedStreamBuffer, "Cannot reset the stream because the mark is not set."); - decodedStreamBuffer.startReadBuffer(); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); } isAtStart = true; isTerminating = false; @@ -298,14 +238,14 @@ private boolean setUpNextChunk() throws IOException { int chunkSizeInBytes = 0; while (chunkSizeInBytes < chunkSize) { /** Read from the buffer of the decoded stream */ - if (null != decodedStreamBuffer && decodedStreamBuffer.hasNext()) { - chunkData[chunkSizeInBytes++] = decodedStreamBuffer.next(); + if (null != underlyingStreamBuffer && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); } else { /** Read from the wrapped stream */ int bytesToRead = chunkSize - chunkSizeInBytes; int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); if (count != -1) { - if (null != decodedStreamBuffer) { - decodedStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + if (null != underlyingStreamBuffer) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); } chunkSizeInBytes += count; } else { @@ -333,13 +273,6 @@ private boolean setUpNextChunk() throws IOException { } } - - @Override - protected InputStream getWrappedInputStream() { - return is; - } - - /** * The final chunk. * @@ -361,5 +294,4 @@ protected InputStream getWrappedInputStream() { * @return ChecksumChunkHeader in bytes based on the Header name field. */ protected abstract byte[] createChecksumChunkHeader(); - -} \ No newline at end of file +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java new file mode 100644 index 000000000000..11beb216f16f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.io.SdkInputStream; +import software.amazon.awssdk.utils.Logger; + +/** + * A wrapper of InputStream that implements streaming in chunks. + */ +@SdkInternalApi +public abstract class AwsChunkedInputStream extends SdkInputStream { + public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; + protected static final int SKIP_BUFFER_SIZE = 256 * 1024; + protected static final Logger log = Logger.loggerFor(AwsChunkedInputStream.class); + protected InputStream is; + /** + * Iterator on the current chunk. + */ + protected ChunkContentIterator currentChunkIterator; + + /** + * Iterator on the buffer of the underlying stream, + * Null if the wrapped stream is marksupported, + * otherwise it will be initialized when this wrapper is marked. + */ + protected UnderlyingStreamBuffer underlyingStreamBuffer; + protected boolean isAtStart = true; + protected boolean isTerminating = false; + + @Override + public int read() throws IOException { + byte[] tmp = new byte[1]; + int count = read(tmp, 0, 1); + if (count > 0) { + log.debug(() -> "One byte read from the stream."); + int unsignedByte = (int) tmp[0] & 0xFF; + return unsignedByte; + } else { + return count; + } + } + + @Override + public long skip(long n) throws IOException { + if (n <= 0) { + return 0; + } + long remaining = n; + int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); + byte[] temp = new byte[toskip]; + while (remaining > 0) { + int count = read(temp, 0, toskip); + if (count < 0) { + break; + } + remaining -= count; + } + return n - remaining; + } + + /** + * @see InputStream#markSupported() + */ + @Override + public boolean markSupported() { + return true; + } + + @Override + protected InputStream getWrappedInputStream() { + return is; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java new file mode 100644 index 000000000000..93642bad8c47 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; + +/** + * A wrapper class of InputStream that implements compression in chunks. + */ +@SdkInternalApi +public final class AwsCompressionInputStream extends AwsChunkedInputStream { + private final Compressor compressor; + + private AwsCompressionInputStream(InputStream in, Compressor compressor) { + this.compressor = compressor; + if (in instanceof AwsCompressionInputStream) { + // This could happen when the request is retried. + AwsCompressionInputStream originalCompressionStream = (AwsCompressionInputStream) in; + this.is = originalCompressionStream.is; + this.underlyingStreamBuffer = originalCompressionStream.underlyingStreamBuffer; + } else { + this.is = in; + this.underlyingStreamBuffer = null; + } + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + abortIfNeeded(); + Validate.notNull(b, "buff"); + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + if (currentChunkIterator == null || !currentChunkIterator.hasNext()) { + if (isTerminating) { + return -1; + } + isTerminating = setUpNextChunk(); + } + + int count = currentChunkIterator.read(b, off, len); + if (count > 0) { + isAtStart = false; + log.trace(() -> count + " byte read from the stream."); + } + return count; + } + + private boolean setUpNextChunk() throws IOException { + byte[] chunkData = new byte[DEFAULT_CHUNK_SIZE]; + int chunkSizeInBytes = 0; + while (chunkSizeInBytes < DEFAULT_CHUNK_SIZE) { + /** Read from the buffer of the uncompressed stream */ + if (underlyingStreamBuffer != null && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); + } else { /** Read from the wrapped stream */ + int bytesToRead = DEFAULT_CHUNK_SIZE - chunkSizeInBytes; + int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); + if (count != -1) { + if (underlyingStreamBuffer != null) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + } + chunkSizeInBytes += count; + } else { + break; + } + } + } + if (chunkSizeInBytes == 0) { + return true; + } + + if (chunkSizeInBytes < chunkData.length) { + chunkData = Arrays.copyOf(chunkData, chunkSizeInBytes); + } + // Compress the chunk + byte[] compressedChunkData = compressor.compress(chunkData); + currentChunkIterator = new ChunkContentIterator(compressedChunkData); + return false; + } + + /** + * The readlimit parameter is ignored. + */ + @Override + public void mark(int readlimit) { + abortIfNeeded(); + if (!isAtStart) { + throw new UnsupportedOperationException("Compression stream only supports mark() at the start of the stream."); + } + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(will directly mark the wrapped stream since it's mark-supported)."); + is.mark(readlimit); + } else { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(initializing the buffer since the wrapped stream is not mark-supported)."); + underlyingStreamBuffer = new UnderlyingStreamBuffer(SKIP_BUFFER_SIZE); + } + } + + /** + * Reset the stream, either by resetting the wrapped stream or using the + * buffer created by this class. + */ + @Override + public void reset() throws IOException { + abortIfNeeded(); + // Clear up any encoded data + currentChunkIterator = null; + // Reset the wrapped stream if it is mark-supported, + // otherwise use our buffered data. + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream reset " + + "(will reset the wrapped stream because it is mark-supported)."); + is.reset(); + } else { + log.debug(() -> "AwsCompressionInputStream reset (will use the buffer of the decoded stream)."); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); + } + isAtStart = true; + isTerminating = false; + } + + public static final class Builder { + InputStream inputStream; + Compressor compressor; + + public AwsCompressionInputStream build() { + return new AwsCompressionInputStream( + this.inputStream, this.compressor); + } + + public Builder inputStream(InputStream inputStream) { + this.inputStream = inputStream; + return this; + } + + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java index 186ca5d7d0d8..4c7f46a248cf 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java @@ -18,7 +18,6 @@ import java.io.InputStream; import java.nio.charset.StandardCharsets; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.internal.chunked.AwsChunkedEncodingConfig; @@ -40,48 +39,6 @@ public static Builder builder() { return new Builder(); } - /** - * Calculates the content length for a given Algorithm and header name. - * - * @param algorithm Algorithm used. - * @param headerName Header name. - * @return Content length of the trailer that will be appended at the end. - */ - public static long calculateChecksumContentLength(Algorithm algorithm, String headerName) { - return headerName.length() - + HEADER_COLON_SEPARATOR.length() - + algorithm.base64EncodedLength().longValue() - + CRLF.length() + CRLF.length(); - } - - /** - * - * @param originalContentLength Original Content length. - * @return Calculatec Chunk Length with the chunk encoding format. - */ - private static long calculateChunkLength(long originalContentLength) { - return Long.toHexString(originalContentLength).length() - + CRLF.length() - + originalContentLength - + CRLF.length(); - } - - public static long calculateStreamContentLength(long originalLength, long defaultChunkSize) { - if (originalLength < 0 || defaultChunkSize == 0) { - throw new IllegalArgumentException(originalLength + ", " + defaultChunkSize + "Args <= 0 not expected"); - } - - long maxSizeChunks = originalLength / defaultChunkSize; - long remainingBytes = originalLength % defaultChunkSize; - - long allChunks = maxSizeChunks * calculateChunkLength(defaultChunkSize); - long remainingInChunk = remainingBytes > 0 ? calculateChunkLength(remainingBytes) : 0; - // last byte is composed of a "0" and "\r\n" - long lastByteSize = 1 + (long) CRLF.length(); - - return allChunks + remainingInChunk + lastByteSize; - } - @Override protected byte[] createFinalChunk(byte[] finalChunk) { StringBuilder chunkHeader = new StringBuilder(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java similarity index 93% rename from core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java rename to core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java index f6d3c47c0c1e..6fc086983fda 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java @@ -20,8 +20,8 @@ import software.amazon.awssdk.utils.Logger; @SdkInternalApi -class DecodedStreamBuffer { - private static final Logger log = Logger.loggerFor(DecodedStreamBuffer.class); +class UnderlyingStreamBuffer { + private static final Logger log = Logger.loggerFor(UnderlyingStreamBuffer.class); private byte[] bufferArray; private int maxBufferSize; @@ -29,7 +29,7 @@ class DecodedStreamBuffer { private int pos = -1; private boolean bufferSizeOverflow; - DecodedStreamBuffer(int maxBufferSize) { + UnderlyingStreamBuffer(int maxBufferSize) { bufferArray = new byte[maxBufferSize]; this.maxBufferSize = maxBufferSize; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java new file mode 100644 index 000000000000..52a222bc372c --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.sync; + +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.io.AwsCompressionInputStream; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * {@link ContentStreamProvider} implementation for compression. + */ +@SdkInternalApi +public class CompressionContentStreamProvider implements ContentStreamProvider { + private final ContentStreamProvider underlyingInputStreamProvider; + private InputStream currentStream; + private final Compressor compressor; + + public CompressionContentStreamProvider(ContentStreamProvider underlyingInputStreamProvider, Compressor compressor) { + this.underlyingInputStreamProvider = underlyingInputStreamProvider; + this.compressor = compressor; + } + + @Override + public InputStream newStream() { + closeCurrentStream(); + currentStream = AwsCompressionInputStream.builder() + .inputStream(underlyingInputStreamProvider.newStream()) + .compressor(compressor) + .build(); + return currentStream; + } + + private void closeCurrentStream() { + if (currentStream != null) { + IoUtils.closeQuietly(currentStream, null); + currentStream = null; + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java index 54ad56781599..91d47c314494 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java @@ -28,35 +28,64 @@ public final class ChunkContentUtils { public static final String ZERO_BYTE = "0"; public static final String CRLF = "\r\n"; + public static final String LAST_CHUNK = ZERO_BYTE + CRLF; + public static final long LAST_CHUNK_LEN = LAST_CHUNK.length(); + private ChunkContentUtils() { } /** + * The chunk format is: chunk-size CRLF chunk-data CRLF. + * * @param originalContentLength Original Content length. - * @return Calculates Chunk Length. + * @return the length of this chunk */ public static long calculateChunkLength(long originalContentLength) { + if (originalContentLength == 0) { + return 0; + } return Long.toHexString(originalContentLength).length() - + CRLF.length() - + originalContentLength - + CRLF.length() - + ZERO_BYTE.length() + CRLF.length(); + + CRLF.length() + + originalContentLength + + CRLF.length(); + } + + /** + * Calculates the content length for data that is divided into chunks. + * + * @param originalLength original content length. + * @param chunkSize chunk size + * @return Content length of the trailer that will be appended at the end. + */ + public static long calculateStreamContentLength(long originalLength, long chunkSize) { + if (originalLength < 0 || chunkSize == 0) { + throw new IllegalArgumentException(originalLength + ", " + chunkSize + "Args <= 0 not expected"); + } + + long maxSizeChunks = originalLength / chunkSize; + long remainingBytes = originalLength % chunkSize; + + long allChunks = maxSizeChunks * calculateChunkLength(chunkSize); + long remainingInChunk = remainingBytes > 0 ? calculateChunkLength(remainingBytes) : 0; + // last byte is composed of a "0" and "\r\n" + long lastByteSize = 1 + (long) CRLF.length(); + + return allChunks + remainingInChunk + lastByteSize; } /** - * Calculates the content length for a given Algorithm and header name. + * Calculates the content length for a given algorithm and header name. * * @param algorithm Algorithm used. * @param headerName Header name. * @return Content length of the trailer that will be appended at the end. */ - public static long calculateChecksumContentLength(Algorithm algorithm, String headerName) { - int checksumLength = algorithm.base64EncodedLength(); - - return (headerName.length() - + HEADER_COLON_SEPARATOR.length() - + checksumLength - + CRLF.length() + CRLF.length()); + public static long calculateChecksumTrailerLength(Algorithm algorithm, String headerName) { + return headerName.length() + + HEADER_COLON_SEPARATOR.length() + + algorithm.base64EncodedLength().longValue() + + CRLF.length() + + CRLF.length(); } /** @@ -86,17 +115,13 @@ public static ByteBuffer createChunk(ByteBuffer chunkData, boolean isLastByte) { chunkHeader.append(CRLF); try { byte[] header = chunkHeader.toString().getBytes(StandardCharsets.UTF_8); - // Last byte does not need additional \r\n trailer byte[] trailer = !isLastByte ? CRLF.getBytes(StandardCharsets.UTF_8) : "".getBytes(StandardCharsets.UTF_8); ByteBuffer chunkFormattedBuffer = ByteBuffer.allocate(header.length + chunkLength + trailer.length); - chunkFormattedBuffer.put(header) - .put(chunkData) - .put(trailer); + chunkFormattedBuffer.put(header).put(chunkData).put(trailer); chunkFormattedBuffer.flip(); return chunkFormattedBuffer; } catch (Exception e) { - // This is to warp BufferOverflowException,ReadOnlyBufferException to SdkClientException. throw SdkClientException.builder() .message("Unable to create chunked data. " + e.getMessage()) .cause(e) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java index 8805653dd636..bee59eae51ff 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java @@ -18,13 +18,17 @@ import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADERS; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZ_ID_2_HEADER; +import java.net.URI; +import java.net.URISyntaxException; import java.time.Duration; import java.util.concurrent.Callable; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.metrics.NoOpMetricCollector; @@ -65,6 +69,23 @@ public static Pair measureDurationUnsafe(Callable c) throws return Pair.of(result, d); } + /** + * Collect the SERVICE_ENDPOINT metric for this request. + */ + public static void collectServiceEndpointMetrics(MetricCollector metricCollector, SdkHttpFullRequest httpRequest) { + if (metricCollector != null && !(metricCollector instanceof NoOpMetricCollector) && httpRequest != null) { + // Only interested in the service endpoint so don't include any path, query, or fragment component + URI requestUri = httpRequest.getUri(); + try { + URI serviceEndpoint = new URI(requestUri.getScheme(), requestUri.getAuthority(), null, null, null); + metricCollector.reportMetric(CoreMetric.SERVICE_ENDPOINT, serviceEndpoint); + } catch (URISyntaxException e) { + // This should not happen since getUri() should return a valid URI + throw SdkClientException.create("Unable to collect SERVICE_ENDPOINT metric", e); + } + } + } + public static void collectHttpMetrics(MetricCollector metricCollector, SdkHttpFullResponse httpResponse) { if (metricCollector != null && !(metricCollector instanceof NoOpMetricCollector) && httpResponse != null) { metricCollector.reportMetric(HttpMetric.HTTP_STATUS_CODE, httpResponse.statusCode()); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java index f4529d32c1a0..df71deacc274 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.metrics; +import java.net.URI; import java.time.Duration; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.retry.RetryPolicy; @@ -50,6 +51,12 @@ public final class CoreMetric { public static final SdkMetric RETRY_COUNT = metric("RetryCount", Integer.class, MetricLevel.ERROR); + /** + * The endpoint for the service. + */ + public static final SdkMetric SERVICE_ENDPOINT = + metric("ServiceEndpoint", URI.class, MetricLevel.ERROR); + /** * The duration of the API call. This includes all call attempts made. * diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java index ab143731c931..5589f67ffbaa 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java @@ -42,8 +42,8 @@ private RetryOnExceptionsCondition(Set> exceptionsToR /** * @param context Context about the state of the last request and information about the number of requests made. - * @return True if the exception class matches one of the whitelisted exceptions or if the cause of the exception matches the - * whitelisted exception. + * @return True if the exception class or the cause of the exception matches one of the exceptions supplied at + * initialization time. */ @Override public boolean shouldRetry(RetryPolicyContext context) { @@ -56,10 +56,10 @@ public boolean shouldRetry(RetryPolicyContext context) { Predicate> isRetryableException = ex -> ex.isAssignableFrom(exception.getClass()); - Predicate> hasRetrableCause = + Predicate> hasRetryableCause = ex -> exception.getCause() != null && ex.isAssignableFrom(exception.getCause().getClass()); - return exceptionsToRetryOn.stream().anyMatch(isRetryableException.or(hasRetrableCause)); + return exceptionsToRetryOn.stream().anyMatch(isRetryableException.or(hasRetryableCause)); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java index f87a70dd5505..bbaa7dd22531 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java @@ -38,7 +38,7 @@ public final class SdkUserAgent { + "vendor/{java.vendor}"; /** Disallowed characters in the user agent token: @see RFC 7230 */ - private static final String UA_BLACKLIST_REGEX = "[() ,/:;<=>?@\\[\\]{}\\\\]"; + private static final String UA_DENYLIST_REGEX = "[() ,/:;<=>?@\\[\\]{}\\\\]"; /** Shared logger for any issues while loading version information. */ private static final Logger log = LoggerFactory.getLogger(SdkUserAgent.class); @@ -125,7 +125,7 @@ String getUserAgent() { * @return the input with spaces replaced by underscores */ private static String sanitizeInput(String input) { - return input == null ? UNKNOWN : input.replaceAll(UA_BLACKLIST_REGEX, "_"); + return input == null ? UNKNOWN : input.replaceAll(UA_DENYLIST_REGEX, "_"); } private static String getAdditionalJvmLanguages() { diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java new file mode 100644 index 000000000000..dec9d8303f69 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/CompressionConfigurationTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class CompressionConfigurationTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(CompressionConfiguration.class) + .withNonnullFields("requestCompressionEnabled", "minimumCompressionThresholdInBytes") + .verify(); + } + + @Test + public void toBuilder() { + CompressionConfiguration configuration = + CompressionConfiguration.builder() + .requestCompressionEnabled(true) + .minimumCompressionThresholdInBytes(99999) + .build(); + + CompressionConfiguration another = configuration.toBuilder().build(); + assertThat(configuration).isEqualTo(another); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java new file mode 100644 index 000000000000..535a7176856c --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.nio.file.Paths; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class FileRequestBodyConfigurationTest { + + @Test + void equalsHashCode() { + EqualsVerifier.forClass(FileRequestBodyConfiguration.class) + .verify(); + } + + @Test + void invalidRequest_shouldThrowException() { + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .position(-1L) + .build()) + .hasMessage("position must not be negative"); + + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .numBytesToRead(-1L) + .build()) + .hasMessage("numBytesToRead must not be negative"); + + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .chunkSizeInBytes(0) + .build()) + .hasMessage("chunkSizeInBytes must be positive"); + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .chunkSizeInBytes(-5) + .build()) + .hasMessage("chunkSizeInBytes must be positive"); + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .build()) + .hasMessage("path"); + } + + @Test + void toBuilder_shouldCopyAllProperties() { + FileRequestBodyConfiguration config = FileRequestBodyConfiguration.builder() + .path(Paths.get(".")).numBytesToRead(100L) + .position(1L) + .chunkSizeInBytes(1024) + .build(); + + assertThat(config.toBuilder().build()).isEqualTo(config); + } + +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java index a553a55a4536..136c28695511 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.core.async; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -24,6 +23,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -41,19 +41,53 @@ class ChunkBufferTest { - @Test - void builderWithNoTotalSize() { - assertThatThrownBy(() -> ChunkBuffer.builder().build()).isInstanceOf(NullPointerException.class); + @ParameterizedTest + @ValueSource(ints = {1, 6, 10, 23, 25}) + void numberOfChunk_Not_MultipleOfTotalBytes_KnownLength(int totalBytes) { + int bufferSize = 5; + + String inputString = RandomStringUtils.randomAscii(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + + AtomicInteger index = new AtomicInteger(0); + int count = (int) Math.ceil(totalBytes / (double) bufferSize); + int remainder = totalBytes % bufferSize; + + byteBuffers.forEach(r -> { + int i = index.get(); + + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8))) { + byte[] expected; + if (i == count - 1 && remainder != 0) { + expected = new byte[remainder]; + } else { + expected = new byte[bufferSize]; + } + inputStream.skip(i * bufferSize); + inputStream.read(expected); + byte[] actualBytes = BinaryUtils.copyBytesFrom(r); + assertThat(actualBytes).isEqualTo(expected); + index.incrementAndGet(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); } @ParameterizedTest @ValueSource(ints = {1, 6, 10, 23, 25}) - void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { + void numberOfChunk_Not_MultipleOfTotalBytes_UnknownLength(int totalBytes) { int bufferSize = 5; String inputString = RandomStringUtils.randomAscii(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); @@ -83,10 +117,12 @@ void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { } @Test - void zeroTotalBytesAsInput_returnsZeroByte() { + void zeroTotalBytesAsInput_returnsZeroByte_KnownLength() { byte[] zeroByte = new byte[0]; - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(5).totalBytes(zeroByte.length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .totalBytes(zeroByte.length) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(zeroByte)); @@ -98,13 +134,30 @@ void zeroTotalBytesAsInput_returnsZeroByte() { } @Test - void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { + void zeroTotalBytesAsInput_returnsZeroByte_UnknownLength() { + byte[] zeroByte = new byte[0]; + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(zeroByte)); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + }); + assertThat(iteratedCounts.get()).isEqualTo(1); + } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_knownLength() { int totalBytes = 17; int bufferSize = 5; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining()).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining()) + .build(); Iterable byteBuffers = chunkBuffer.split(wrap); @@ -121,6 +174,34 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { assertThat(iteratedCounts.get()).isEqualTo(4); } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_unknownLength() { + int totalBytes = 17; + int bufferSize = 5; + ByteBuffer wrap = ByteBuffer.allocate(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); + Iterable byteBuffers = + chunkBuffer.split(wrap); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + if (iteratedCounts.get() * bufferSize < totalBytes) { + // array of empty bytes + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(bufferSize).array()); + } else { + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(totalBytes % bufferSize).array()); + } + }); + assertThat(iteratedCounts.get()).isEqualTo(3); + + Optional lastBuffer = chunkBuffer.getBufferedData(); + assertThat(lastBuffer).isPresent(); + assertThat(lastBuffer.get().remaining()).isEqualTo(2); + } + /** * * Total bytes 11(ChunkSize) 3 (threads) @@ -152,14 +233,16 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { * 111 is given as output since we consumed all the total bytes* */ @Test - void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, InterruptedException { + void concurrentTreads_calling_bufferAndCreateChunks_knownLength() throws ExecutionException, InterruptedException { int totalBytes = 17; int bufferSize = 5; int threads = 8; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining() * threads).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining() * threads) + .build(); ExecutorService service = Executors.newFixedThreadPool(threads); @@ -198,7 +281,4 @@ void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, assertThat(remainderBytesBuffers.get()).isOne(); assertThat(otherSizeBuffers.get()).isZero(); } - } - - diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java new file mode 100644 index 000000000000..54c74e1e97e9 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.async; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import io.reactivex.Flowable; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Optional; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class CompressionAsyncRequestBodyTckTest extends PublisherVerification { + + private static final FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + private static final Path rootDir = fs.getRootDirectories().iterator().next(); + private static final int MAX_ELEMENTS = 1000; + private static final int CHUNK_SIZE = 128 * 1024; + private static final Compressor compressor = new GzipCompressor(); + + public CompressionAsyncRequestBodyTckTest() { + super(new TestEnvironment()); + } + + @Override + public long maxElementsFromPublisher() { + return MAX_ELEMENTS; + } + + @Override + public Publisher createPublisher(long n) { + return CompressionAsyncRequestBody.builder() + .asyncRequestBody(customAsyncRequestBodyFromFileWithoutContentLength(n)) + .compressor(compressor) + .build(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + private static AsyncRequestBody customAsyncRequestBodyFromFileWithoutContentLength(long nChunks) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromFile(fileOfNChunks(nChunks))).subscribe(s); + } + }; + } + + private static Path fileOfNChunks(long nChunks) { + String name = String.format("%d-chunks-file.dat", nChunks); + Path p = rootDir.resolve(name); + if (!Files.exists(p)) { + try (OutputStream os = Files.newOutputStream(p)) { + os.write(createCompressibleArrayOfNChunks(nChunks)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return p; + } + + private static byte[] createCompressibleArrayOfNChunks(long nChunks) { + int size = Math.toIntExact(nChunks * CHUNK_SIZE); + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return data.array(); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java index 592873971934..c71816e1ff27 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java @@ -21,6 +21,7 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -141,6 +142,23 @@ public void flatMapIterableHandlesError() { .hasCause(exception); } + @Test + public void addTrailingData_handlesCorrectly() { + FakeSdkPublisher fakePublisher = new FakeSdkPublisher<>(); + + FakeStringSubscriber fakeSubscriber = new FakeStringSubscriber(); + fakePublisher.addTrailingData(() -> Arrays.asList("two", "three")) + .subscribe(fakeSubscriber); + + fakePublisher.publish("one"); + fakePublisher.complete(); + + assertThat(fakeSubscriber.recordedEvents()).containsExactly("one", "two", "three"); + assertThat(fakeSubscriber.isComplete()).isTrue(); + assertThat(fakeSubscriber.isError()).isFalse(); + } + + private final static class FakeByteBufferSubscriber implements Subscriber { private final List recordedEvents = new ArrayList<>(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java index 0fa862dd2acb..44ac097d16cf 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.core.checksum; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumTrailerLength; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -25,6 +26,7 @@ import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.internal.io.AwsChunkedEncodingInputStream; import software.amazon.awssdk.core.internal.io.AwsUnsignedChunkedEncodingInputStream; +import software.amazon.awssdk.core.internal.util.ChunkContentUtils; public class AwsChunkedEncodingInputStreamTest { @@ -55,10 +57,9 @@ public void readAwsUnsignedChunkedEncodingInputStream() throws IOException { public void lengthsOfCalculateByChecksumCalculatingInputStream(){ String initialString = "Hello world"; - long calculateChunkLength = AwsUnsignedChunkedEncodingInputStream.calculateStreamContentLength(initialString.length(), - AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE); - long checksumContentLength = AwsUnsignedChunkedEncodingInputStream.calculateChecksumContentLength( - SHA256_ALGORITHM, SHA256_HEADER_NAME); + long calculateChunkLength = ChunkContentUtils.calculateStreamContentLength(initialString.length(), + AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE); + long checksumContentLength = calculateChecksumTrailerLength(SHA256_ALGORITHM, SHA256_HEADER_NAME); assertThat(calculateChunkLength).isEqualTo(19); assertThat(checksumContentLength).isEqualTo(71); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java new file mode 100644 index 000000000000..f67315b8e5da --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.compression; + +import static org.assertj.core.api.Assertions.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.internal.compression.CompressorType; + +public class CompressorTypeTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(CompressorType.class) + .withNonnullFields("id") + .verify(); + } + + @Test + public void compressorType_gzip() { + CompressorType gzip = CompressorType.GZIP; + CompressorType gzipFromString = CompressorType.of("gzip"); + assertThat(gzip).isSameAs(gzipFromString); + assertThat(gzip).isEqualTo(gzipFromString); + } + + @Test + public void compressorType_usesSameInstance_when_sameCompressorTypeOfSameValue() { + CompressorType brotliFromString = CompressorType.of("brotli"); + CompressorType brotliFromStringDuplicate = CompressorType.of("brotli"); + assertThat(brotliFromString).isSameAs(brotliFromStringDuplicate); + assertThat(brotliFromString).isEqualTo(brotliFromStringDuplicate); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java index 39abaffd8f71..4aaeaa3c0710 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java @@ -28,11 +28,13 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import io.reactivex.Flowable; +import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.assertj.core.util.Lists; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.core.async.AsyncRequestBody; @@ -44,78 +46,91 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -@RunWith(Parameterized.class) public class ChecksumCalculatingAsyncRequestBodyTest { - private final static String testString = "Hello world"; - private final static String expectedTestString = "b\r\n" + + private static final String testString = "Hello world"; + private static final String expectedTestString = "b\r\n" + testString + "\r\n" + "0\r\n" + "x-amz-checksum-crc32:i9aeUg==\r\n\r\n"; - private final static Path path; - - private final static ByteBuffer positionNonZeroBytebuffer; - - private final static ByteBuffer positionZeroBytebuffer; + private static final String emptyString = ""; + private static final String expectedEmptyString = "0\r\n" + + "x-amz-checksum-crc32:AAAAAA==\r\n\r\n"; + private static final Path path; + private static final Path pathToEmpty; static { - byte[] content = testString.getBytes(); - byte[] randomContent = RandomStringUtils.randomAscii(1024).getBytes(StandardCharsets.UTF_8); - positionNonZeroBytebuffer = ByteBuffer.allocate(content.length + randomContent.length); - positionNonZeroBytebuffer.put(randomContent) - .put(content); - positionNonZeroBytebuffer.position(randomContent.length); - - positionZeroBytebuffer = ByteBuffer.allocate(content.length); - positionZeroBytebuffer.put(content); - positionZeroBytebuffer.flip(); - FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); path = fs.getPath("./test"); + pathToEmpty = fs.getPath("./testEmpty"); try { - Files.write(path, content); + Files.write(path, testString.getBytes()); + Files.write(pathToEmpty, emptyString.getBytes()); + } catch (IOException e) { e.printStackTrace(); } } - private final AsyncRequestBody provider; - - public ChecksumCalculatingAsyncRequestBodyTest(AsyncRequestBody provider) { - this.provider = provider; + private static Stream publishers() { + return Stream.of( + Arguments.of("RequestBody from string, test string", + checksumPublisher(AsyncRequestBody.fromString(testString)), + expectedTestString), + Arguments.of("RequestBody from file, test string", + checksumPublisher(AsyncRequestBody.fromFile(path)), + expectedTestString), + Arguments.of("RequestBody from buffer, 0 pos, test string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBuffer(posZeroByteBuffer(testString))), + expectedTestString), + Arguments.of("RequestBody from buffer, random pos, test string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBufferUnsafe(nonPosZeroByteBuffer(testString))), + expectedTestString), + Arguments.of("RequestBody from string, empty string", + checksumPublisher(AsyncRequestBody.fromString(emptyString)), + expectedEmptyString), + //Note: FileAsyncRequestBody with empty file does not call onNext, only onComplete() + Arguments.of("RequestBody from file, empty string", + checksumPublisher(AsyncRequestBody.fromFile(pathToEmpty)), + expectedEmptyString), + Arguments.of("RequestBody from buffer, 0 pos, empty string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBuffer(posZeroByteBuffer(emptyString))), + expectedEmptyString), + Arguments.of("RequestBody from string, random pos, empty string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBufferUnsafe(nonPosZeroByteBuffer(emptyString))), + expectedEmptyString)); } - @Parameterized.Parameters - public static AsyncRequestBody[] data() { - AsyncRequestBody[] asyncRequestBodies = { - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromString(testString)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromFile(path)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), + private static ChecksumCalculatingAsyncRequestBody checksumPublisher(AsyncRequestBody sourcePublisher) { + return ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(sourcePublisher) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(); + } - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffer(positionZeroBytebuffer)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffersUnsafe(positionNonZeroBytebuffer)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - }; - return asyncRequestBodies; + private static ByteBuffer posZeroByteBuffer(String content) { + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + ByteBuffer bytes = ByteBuffer.allocate(contentBytes.length); + bytes.put(contentBytes); + bytes.flip(); + return bytes; } - @Test - public void hasCorrectLength() { - assertThat(provider.contentLength()).hasValue((long) expectedTestString.length()); + private static ByteBuffer nonPosZeroByteBuffer(String content) { + byte[] randomContent = RandomStringUtils.randomAscii(1024).getBytes(StandardCharsets.UTF_8); + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + + ByteBuffer bytes = ByteBuffer.allocate(contentBytes.length + randomContent.length); + bytes.put(randomContent) + .put(contentBytes); + bytes.position(randomContent.length); + return bytes; } - @Test - public void hasCorrectContent() throws InterruptedException { + @ParameterizedTest(name = "{index} {0}") + @MethodSource("publishers") + public void publish_differentAsyncRequestBodiesAndSources_produceCorrectData(String description, + AsyncRequestBody provider, + String expectedContent) throws InterruptedException { StringBuilder sb = new StringBuilder(); CountDownLatch done = new CountDownLatch(1); @@ -136,14 +151,15 @@ public void onComplete() { done.countDown(); } }; - provider.subscribe(subscriber); done.await(10, TimeUnit.SECONDS); - assertThat(sb).hasToString(expectedTestString); + + assertThat(provider.contentLength()).hasValue((long) expectedContent.length()); + assertThat(sb).hasToString(expectedContent); } @Test - public void stringConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromString_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) .algorithm(Algorithm.CRC32) @@ -153,7 +169,7 @@ public void stringConstructorHasCorrectContentType() { } @Test - public void fileConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromFile_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromFile(path)) .algorithm(Algorithm.CRC32) @@ -163,7 +179,7 @@ public void fileConstructorHasCorrectContentType() { } @Test - public void bytesArrayConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromBytes_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromBytes("hello world".getBytes())) .algorithm(Algorithm.CRC32) @@ -173,7 +189,7 @@ public void bytesArrayConstructorHasCorrectContentType() { } @Test - public void bytesBufferConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromByteBuffer_hasCorrectContentType() { ByteBuffer byteBuffer = ByteBuffer.wrap("hello world".getBytes()); AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromByteBuffer(byteBuffer)) @@ -184,7 +200,7 @@ public void bytesBufferConstructorHasCorrectContentType() { } @Test - public void emptyBytesConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromEmpty_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.empty()) .algorithm(Algorithm.CRC32) @@ -194,7 +210,7 @@ public void emptyBytesConstructorHasCorrectContentType() { } @Test - public void publisherConstructorThrowsExceptionIfNoContentLength() { + public void constructor_asyncRequestBodyFromPublisher_NoContentLength_throwsException() { List requestBodyStrings = Lists.newArrayList("A", "B", "C"); List bodyBytes = requestBodyStrings.stream() .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) @@ -210,22 +226,31 @@ public void publisherConstructorThrowsExceptionIfNoContentLength() { } @Test - public void fromBytes_NullChecks() { - - ChecksumCalculatingAsyncRequestBody.Builder noAlgorithmBuilder = ChecksumCalculatingAsyncRequestBody - .builder() - .asyncRequestBody( - AsyncRequestBody.fromString("Hello world")); + public void constructor_checksumIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) + .trailerHeader("x-amzn-checksum-crc32") + .build()).withMessage("algorithm cannot be null"); + } - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noAlgorithmBuilder.build()); + @Test + public void constructor_asyncRequestBodyIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amzn-checksum-crc32") + .build()).withMessage("wrapped AsyncRequestBody cannot be null"); + } - ChecksumCalculatingAsyncRequestBody.Builder noAsyncReqBodyBuilder = ChecksumCalculatingAsyncRequestBody - .builder().algorithm(Algorithm.CRC32).trailerHeader("x-amzn-checksum-crc32"); - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noAsyncReqBodyBuilder.build()); + @Test + public void constructor_trailerHeaderIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .algorithm(Algorithm.CRC32) + .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) + .build()).withMessage("trailerHeader cannot be null"); - ChecksumCalculatingAsyncRequestBody.Builder noTrailerHeaderBuilder = ChecksumCalculatingAsyncRequestBody - .builder().asyncRequestBody(AsyncRequestBody.fromString("Hello world")).algorithm(Algorithm.CRC32); - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noTrailerHeaderBuilder.build()); } @Test diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java new file mode 100644 index 000000000000..ffb15e282a13 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.zip.GZIPInputStream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.http.async.SimpleSubscriber; + +public final class CompressionAsyncRequestBodyTest { + private static final Compressor compressor = new GzipCompressor(); + + @ParameterizedTest + @ValueSource(ints = {80, 1000}) + public void hasCorrectContent(int bodySize) throws Exception { + String testString = createCompressibleStringOfGivenSize(bodySize); + byte[] testBytes = testString.getBytes(); + int chunkSize = 133; + AsyncRequestBody provider = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(customAsyncRequestBodyWithoutContentLength(testBytes)) + .chunkSize(chunkSize) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(testString.length()); + CountDownLatch done = new CountDownLatch(1); + AtomicInteger pos = new AtomicInteger(); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + + // verify each chunk + byte[] chunkToVerify = new byte[chunkSize]; + System.arraycopy(testBytes, pos.get(), chunkToVerify, 0, chunkSize); + chunkToVerify = compressor.compress(chunkToVerify); + + assertThat(bytes).isEqualTo(chunkToVerify); + pos.addAndGet(chunkSize); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + provider.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + + byte[] retrieved = byteBuffer.array(); + byte[] uncompressed = decompress(retrieved); + assertThat(new String(uncompressed)).isEqualTo(testString); + } + + @Test + public void emptyBytesConstructor_hasEmptyContent() throws Exception { + AsyncRequestBody requestBody = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(AsyncRequestBody.empty()) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(0); + CountDownLatch done = new CountDownLatch(1); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + requestBody.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + assertThat(byteBuffer.array()).isEmpty(); + assertThat(byteBuffer.array()).isEqualTo(new byte[0]); + assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + } + + private static String createCompressibleStringOfGivenSize(int size) { + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return new String(data.array()); + } + + private static byte[] decompress(byte[] compressedData) throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + return decompressedData; + } + + private static AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] content) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(content)) + .subscribe(s); + } + }; + } +} \ No newline at end of file diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java new file mode 100644 index 000000000000..4c5d0748d16d --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java @@ -0,0 +1,96 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.testutils.RandomTempFile; + +public class FileAsyncRequestBodySplitHelperTest { + + private static final int CHUNK_SIZE = 5; + private static Path testFile; + private static ScheduledExecutorService executor; + + + @BeforeAll + public static void setup() throws IOException { + testFile = new RandomTempFile(2000).toPath(); + executor = Executors.newScheduledThreadPool(1); + } + + @AfterAll + public static void teardown() throws IOException { + try { + Files.delete(testFile); + } catch (NoSuchFileException e) { + // ignore + } + executor.shutdown(); + } + + @ParameterizedTest + @ValueSource(ints = {CHUNK_SIZE, CHUNK_SIZE * 2 - 1, CHUNK_SIZE * 2}) + public void split_differentChunkSize_shouldSplitCorrectly(int chunkSize) throws Exception { + long bufferSize = 55l; + int chunkSizeInBytes = 10; + FileAsyncRequestBody fileAsyncRequestBody = FileAsyncRequestBody.builder() + .path(testFile) + .chunkSizeInBytes(10) + .build(); + AsyncRequestBodySplitConfiguration config = + AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes((long) chunkSize) + .bufferSizeInBytes(55L) + .build(); + FileAsyncRequestBodySplitHelper helper = new FileAsyncRequestBodySplitHelper(fileAsyncRequestBody, config); + + AtomicInteger maxConcurrency = new AtomicInteger(0); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(verifyConcurrentRequests(helper, maxConcurrency), + 1, 50, TimeUnit.MICROSECONDS); + + verifyIndividualAsyncRequestBody(helper.split(), testFile, chunkSize); + scheduledFuture.cancel(true); + int expectedMaxConcurrency = (int) (bufferSize / chunkSizeInBytes); + assertThat(maxConcurrency.get()).isLessThanOrEqualTo(expectedMaxConcurrency); + } + + private static Runnable verifyConcurrentRequests(FileAsyncRequestBodySplitHelper helper, AtomicInteger maxConcurrency) { + return () -> { + int concurrency = helper.numAsyncRequestBodiesInFlight().get(); + + if (concurrency > maxConcurrency.get()) { + maxConcurrency.set(concurrency); + } + assertThat(helper.numAsyncRequestBodiesInFlight()).hasValueLessThan(10); + }; + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java index da9daf557e22..5d12035c1879 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java @@ -15,11 +15,14 @@ package software.amazon.awssdk.core.internal.async; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.Assertions.assertTrue; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.ByteArrayOutputStream; +import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -35,9 +38,12 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.utils.BinaryUtils; @@ -45,10 +51,12 @@ public class FileAsyncRequestBodyTest { private static final long MiB = 1024 * 1024; private static final long TEST_FILE_SIZE = 10 * MiB; private static Path testFile; + private static Path smallFile; @BeforeEach public void setup() throws IOException { testFile = new RandomTempFile(TEST_FILE_SIZE).toPath(); + smallFile = new RandomTempFile(100).toPath(); } @AfterEach @@ -226,6 +234,84 @@ public void changingFile_fileGetsDeleted_failsBecauseDeleted() throws Exception .hasCauseInstanceOf(IOException.class); } + @Test + public void positionNotZero_shouldReadFromPosition() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + long position = 20L; + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .position(position) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(80L); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[80]; + try(FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.skip(position); + inputStream.read(expected, 0, 80); + } + + assertThat(bytes).isEqualTo(expected); + } + + @Test + public void bothPositionAndNumBytesToReadConfigured_shouldHonor() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + long position = 20L; + long numBytesToRead = 5L; + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .position(position) + .numBytesToRead(numBytesToRead) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(numBytesToRead); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[5]; + try (FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.skip(position); + inputStream.read(expected, 0, 5); + } + + assertThat(bytes).isEqualTo(expected); + } + + @Test + public void numBytesToReadConfigured_shouldHonor() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .numBytesToRead(5L) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(5L); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[5]; + try (FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.read(expected, 0, 5); + } + + assertThat(bytes).isEqualTo(expected); + } + private static class ControllableSubscriber implements Subscriber { private final ByteArrayOutputStream output = new ByteArrayOutputStream(); private final CompletableFuture completed = new CompletableFuture<>(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java index 0966ea6eb76f..d2e06f28492a 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.ByteArrayInputStream; @@ -44,6 +45,7 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; import software.amazon.awssdk.utils.BinaryUtils; public class SplittingPublisherTest { @@ -72,11 +74,10 @@ public static void afterAll() throws Exception { public void split_contentUnknownMaxMemorySmallerThanChunkSize_shouldThrowException() { AsyncRequestBody body = AsyncRequestBody.fromPublisher(s -> { }); - assertThatThrownBy(() -> SplittingPublisher.builder() - .asyncRequestBody(body) - .chunkSizeInBytes(10L) - .bufferSizeInBytes(5L) - .build()) + assertThatThrownBy(() -> new SplittingPublisher(body, AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes(10L) + .bufferSizeInBytes(5L) + .build())) .hasMessageContaining("must be larger than or equal"); } @@ -106,11 +107,10 @@ public Optional contentLength() { return Optional.empty(); } }; - SplittingPublisher splittingPublisher = SplittingPublisher.builder() - .asyncRequestBody(asyncRequestBody) + SplittingPublisher splittingPublisher = new SplittingPublisher(asyncRequestBody, AsyncRequestBodySplitConfiguration.builder() .chunkSizeInBytes((long) CHUNK_SIZE) .bufferSizeInBytes(10L) - .build(); + .build()); List> futures = new ArrayList<>(); @@ -148,38 +148,13 @@ public Optional contentLength() { private static void verifySplitContent(AsyncRequestBody asyncRequestBody, int chunkSize) throws Exception { - SplittingPublisher splittingPublisher = SplittingPublisher.builder() - .asyncRequestBody(asyncRequestBody) - .chunkSizeInBytes((long) chunkSize) - .bufferSizeInBytes((long) chunkSize * 4) - .build(); + SplittingPublisher splittingPublisher = new SplittingPublisher(asyncRequestBody, + AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes((long) chunkSize) + .bufferSizeInBytes((long) chunkSize * 4) + .build()); - List> futures = new ArrayList<>(); - - splittingPublisher.subscribe(requestBody -> { - CompletableFuture baosFuture = new CompletableFuture<>(); - BaosSubscriber subscriber = new BaosSubscriber(baosFuture); - futures.add(baosFuture); - requestBody.subscribe(subscriber); - }).get(5, TimeUnit.SECONDS); - - assertThat(futures.size()).isEqualTo((int) Math.ceil(CONTENT_SIZE / (double) chunkSize)); - - for (int i = 0; i < futures.size(); i++) { - try (FileInputStream fileInputStream = new FileInputStream(testFile)) { - byte[] expected; - if (i == futures.size() - 1) { - int lastChunk = CONTENT_SIZE % chunkSize == 0 ? chunkSize : (CONTENT_SIZE % chunkSize); - expected = new byte[lastChunk]; - } else { - expected = new byte[chunkSize]; - } - fileInputStream.skip(i * chunkSize); - fileInputStream.read(expected); - byte[] actualBytes = futures.get(i).join(); - assertThat(actualBytes).isEqualTo(expected); - }; - } + verifyIndividualAsyncRequestBody(splittingPublisher, testFile.toPath(), chunkSize); } private static class TestAsyncRequestBody implements AsyncRequestBody { diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java new file mode 100644 index 000000000000..04da97adbf42 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.File; +import java.io.FileInputStream; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.assertj.core.api.Assertions; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.internal.async.ByteArrayAsyncResponseTransformer; +import software.amazon.awssdk.core.internal.async.SplittingPublisherTest; + +public final class SplittingPublisherTestUtils { + + public static void verifyIndividualAsyncRequestBody(SdkPublisher publisher, + Path file, + int chunkSize) throws Exception { + + List> futures = new ArrayList<>(); + publisher.subscribe(requestBody -> { + CompletableFuture baosFuture = new CompletableFuture<>(); + ByteArrayAsyncResponseTransformer.BaosSubscriber subscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(baosFuture); + requestBody.subscribe(subscriber); + futures.add(baosFuture); + }).get(5, TimeUnit.SECONDS); + + long contentLength = file.toFile().length(); + Assertions.assertThat(futures.size()).isEqualTo((int) Math.ceil(contentLength / (double) chunkSize)); + + for (int i = 0; i < futures.size(); i++) { + try (FileInputStream fileInputStream = new FileInputStream(file.toFile())) { + byte[] expected; + if (i == futures.size() - 1) { + int lastChunk = contentLength % chunkSize == 0 ? chunkSize : (int) (contentLength % chunkSize); + expected = new byte[lastChunk]; + } else { + expected = new byte[chunkSize]; + } + fileInputStream.skip(i * chunkSize); + fileInputStream.read(expected); + byte[] actualBytes = futures.get(i).join(); + Assertions.assertThat(actualBytes).isEqualTo(expected); + } + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java new file mode 100644 index 000000000000..24fb71940f61 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.core.Is.is; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.zip.GZIPInputStream; +import org.junit.Test; + +public class GzipCompressorTest { + private static final Compressor gzipCompressor = new GzipCompressor(); + private static final String COMPRESSABLE_STRING = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + + @Test + public void compressedData_decompressesCorrectly() throws IOException { + byte[] originalData = COMPRESSABLE_STRING.getBytes(StandardCharsets.UTF_8); + byte[] compressedData = gzipCompressor.compress(originalData); + + int uncompressedSize = originalData.length; + int compressedSize = compressedData.length; + assertThat(compressedSize, lessThan(uncompressedSize)); + + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + + assertThat(decompressedData, is(originalData)); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java new file mode 100644 index 000000000000..99359dfcd58d --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static software.amazon.awssdk.core.util.FileUtils.generateRandomAsciiFile; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Random; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class AwsCompressionInputStreamTest { + private static Compressor compressor; + + @BeforeClass + public static void setup() throws IOException { + compressor = new GzipCompressor(); + } + + @Test + public void nonMarkSupportedInputStream_marksAndResetsCorrectly() throws IOException { + File file = generateRandomAsciiFile(100); + InputStream is = new FileInputStream(file); + assertFalse(is.markSupported()); + + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + @Test + public void markSupportedInputStream_marksAndResetsCorrectly() throws IOException { + InputStream is = new ByteArrayInputStream(generateRandomBody(100)); + assertTrue(is.markSupported()); + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + private byte[] generateRandomBody(int size) { + byte[] randomData = new byte[size]; + new Random().nextBytes(randomData); + return randomData; + } + + private String readInputStream(InputStream is) throws IOException { + byte[] buffer = new byte[512]; + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + int bytesRead; + while ((bytesRead = is.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + } + return byteArrayOutputStream.toString(); + } +} diff --git a/docs/LaunchChangelog.md b/docs/LaunchChangelog.md index fc576a0d6f32..4748d67457de 100644 --- a/docs/LaunchChangelog.md +++ b/docs/LaunchChangelog.md @@ -827,3 +827,4 @@ The following libraries are available in 2.0: | Waiters | [Waiters](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/waiters.html) | 2.15.0 | | CloudFrontUrlSigner, CloudFrontCookieSigner | [CloudFrontUtilities](https://aws.amazon.com/blogs/developer/amazon-cloudfront-signed-urls-and-cookies-are-now-supported-in-aws-sdk-for-java-2-x/) | 2.18.33 | | TransferManager | [S3TransferManager](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/transfer-manager.html) | 2.19.0 | +| IAM Policy Builder | [IAM Policy Builder](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/feature-iam-policy-builder.html) | 2.20.126 diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 57efd1b26eac..726b9513b770 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 4035dce765cd..d1f8832eab9f 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 6cfc5639362e..45beb2ed2838 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index bf36a80542d6..6f4e307ce788 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 0bb3c35ddaa0..bdf2bd6cae6c 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index eef4dcc4925e..7893aa9a73c4 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index 6618f5733e78..4f324b9a0508 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index 03561273bd0f..1b154b101d0c 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index ae803de36217..ed2a4b079ce1 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -91,9 +91,9 @@ ${project.version} - 2.20.125 - 2.13.2 - 2.13.4.2 + 2.20.143 + 2.15.2 + 2.15.2 2.13.2 1.0.1 3.12.0 @@ -135,7 +135,7 @@ 9.4.45.v20220203 - 3.0.0-M5 + 3.1.2 3.8.1 3.1.2 3.0.0-M5 @@ -145,7 +145,7 @@ 3.1.1 1.6 8.42 - 0.8.7 + 0.8.10 1.6.8 1.6.0 2.8.2 @@ -169,7 +169,7 @@ 4.4.13 - 1.0.3 + 1.0.4 ${skipTests} ${project.basedir}/src/it/java @@ -256,7 +256,6 @@ maven-surefire-plugin ${maven.surefire.version} - ${argLine} **/*StabilityTest.java **/*StabilityTests.java diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 12d2868b89e9..259fab616b3a 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index d92f55453908..8e87d888ec50 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java index 0337ba209cb0..69a7807bb970 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java @@ -19,8 +19,10 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.valueRef; import static software.amazon.awssdk.enhanced.dynamodb.internal.update.UpdateExpressionUtils.ifNotExists; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -37,12 +39,12 @@ import software.amazon.awssdk.enhanced.dynamodb.update.UpdateExpression; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Logger; /** - * This extension enables atomic counter attributes to be written to the database. - * The extension is loaded by default when you instantiate a - * {@link DynamoDbEnhancedClient} and only needs to be added to the client if you - * are adding custom extensions to the client. + * This extension enables atomic counter attributes to be changed in DynamoDb by creating instructions for modifying + * an existing value or setting a start value. The extension is loaded by default when you instantiate a + * {@link DynamoDbEnhancedClient} and only needs to be added to the client if you are adding custom extensions to the client. *

    * To utilize atomic counters, first create a field in your model that will be used to store the counter. * This class field should of type {@link Long} and you need to tag it as an atomic counter: @@ -56,8 +58,7 @@ *

    * Every time a new update of the record is successfully written to the database, the counter will be updated automatically. * By default, the counter starts at 0 and increments by 1 for each update. The tags provide the capability of adjusting - * the counter start and increment/decrement values such as described in - * {@link DynamoDbAtomicCounter}. + * the counter start and increment/decrement values such as described in {@link DynamoDbAtomicCounter}. *

    * Example 1: Using a bean based table schema *

    @@ -86,10 +87,18 @@
      * }
      * 
    *

    - * NOTE: When using putItem, the counter will be reset to its start value. + * NOTES: + *

      + *
    • When using putItem, the counter will be reset to its start value.
    • + *
    • The extension will remove any existing occurrences of the atomic counter attributes from the record during an + * updateItem operation. Manually editing attributes marked as atomic counters will have NO EFFECT.
    • + *
    */ @SdkPublicApi public final class AtomicCounterExtension implements DynamoDbEnhancedClientExtension { + + private static final Logger log = Logger.loggerFor(AtomicCounterExtension.class); + private AtomicCounterExtension() { } @@ -118,6 +127,7 @@ public WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite contex break; case UPDATE_ITEM: modificationBuilder.updateExpression(createUpdateExpression(counters)); + modificationBuilder.transformedItem(filterFromItem(counters, context.items())); break; default: break; } @@ -136,6 +146,22 @@ private Map addToItem(Map counter return Collections.unmodifiableMap(itemToTransform); } + private Map filterFromItem(Map counters, Map items) { + Map itemToTransform = new HashMap<>(items); + List removedAttributes = new ArrayList<>(); + for (String attributeName : counters.keySet()) { + if (itemToTransform.containsKey(attributeName)) { + itemToTransform.remove(attributeName); + removedAttributes.add(attributeName); + } + } + if (!removedAttributes.isEmpty()) { + log.debug(() -> String.format("Filtered atomic counter attributes from existing update item to avoid collisions: %s", + String.join(",", removedAttributes))); + } + return Collections.unmodifiableMap(itemToTransform); + } + private SetAction counterAction(Map.Entry e) { String attributeName = e.getKey(); AtomicCounter counter = e.getValue(); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java index 5d1ea52390f6..0a9706d13798 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java @@ -67,7 +67,7 @@ * } * * public void setId(String id) { - * this.name = id; + * this.id = id; * } * * public Instant getCreatedOn() { diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java index 4ca347f038b2..6ee6cf915d74 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java @@ -87,7 +87,10 @@ public void beforeWrite_updateItemOperation_hasCounters_createsUpdateExpression( .operationName(OperationName.UPDATE_ITEM) .operationContext(PRIMARY_CONTEXT).build()); - assertThat(result.transformedItem()).isNull(); + Map transformedItem = result.transformedItem(); + assertThat(transformedItem).isNotNull().hasSize(1); + assertThat(transformedItem).containsEntry("id", AttributeValue.fromS(RECORD_ID)); + assertThat(result.updateExpression()).isNotNull(); List setActions = result.updateExpression().setActions(); @@ -112,11 +115,39 @@ public void beforeWrite_updateItemOperation_noCounters_noChanges() { .tableMetadata(SIMPLE_ITEM_MAPPER.tableMetadata()) .operationName(OperationName.UPDATE_ITEM) .operationContext(PRIMARY_CONTEXT).build()); - assertThat(result.transformedItem()).isNull(); assertThat(result.updateExpression()).isNull(); } + @Test + public void beforeWrite_updateItemOperation_hasCountersInItem_createsUpdateExpressionAndFilters() { + AtomicCounterItem atomicCounterItem = new AtomicCounterItem(); + atomicCounterItem.setId(RECORD_ID); + atomicCounterItem.setCustomCounter(255L); + + Map items = ITEM_MAPPER.itemToMap(atomicCounterItem, true); + assertThat(items).hasSize(2); + + WriteModification result = + atomicCounterExtension.beforeWrite(DefaultDynamoDbExtensionContext.builder() + .items(items) + .tableMetadata(ITEM_MAPPER.tableMetadata()) + .operationName(OperationName.UPDATE_ITEM) + .operationContext(PRIMARY_CONTEXT).build()); + + Map transformedItem = result.transformedItem(); + assertThat(transformedItem).isNotNull().hasSize(1); + assertThat(transformedItem).containsEntry("id", AttributeValue.fromS(RECORD_ID)); + + assertThat(result.updateExpression()).isNotNull(); + + List setActions = result.updateExpression().setActions(); + assertThat(setActions).hasSize(2); + + verifyAction(setActions, "customCounter", "5", "5"); + verifyAction(setActions, "defaultCounter", "-1", "1"); + } + @Test public void beforeWrite_putItemOperation_hasCounters_createsItemTransform() { AtomicCounterItem atomicCounterItem = new AtomicCounterItem(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java index a07d16a8f5db..9b3d12e6d55f 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java @@ -112,15 +112,35 @@ public void createViaPut_incrementsCorrectly() { } @Test - public void createViaUpdate_settingCounterInPojo_throwsException() { + public void createViaUpdate_settingCounterInPojo_hasNoEffect() { AtomicCounterRecord record = new AtomicCounterRecord(); record.setId(RECORD_ID); record.setDefaultCounter(10L); record.setAttribute1(STRING_VALUE); - assertThatThrownBy(() -> mappedTable.updateItem(record)) - .isInstanceOf(DynamoDbException.class) - .hasMessageContaining("Two document paths"); + mappedTable.updateItem(record); + AtomicCounterRecord persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getAttribute1()).isEqualTo(STRING_VALUE); + assertThat(persistedRecord.getDefaultCounter()).isEqualTo(0L); + assertThat(persistedRecord.getCustomCounter()).isEqualTo(10L); + assertThat(persistedRecord.getDecreasingCounter()).isEqualTo(-20L); + } + + @Test + public void updateItem_retrievedFromDb_shouldNotThrowException() { + AtomicCounterRecord record = new AtomicCounterRecord(); + record.setId(RECORD_ID); + record.setAttribute1(STRING_VALUE); + mappedTable.updateItem(record); + + AtomicCounterRecord retrievedRecord = mappedTable.getItem(record); + retrievedRecord.setAttribute1("ChangingThisAttribute"); + + retrievedRecord = mappedTable.updateItem(retrievedRecord); + assertThat(retrievedRecord).isNotNull(); + assertThat(retrievedRecord.getDefaultCounter()).isEqualTo(1L); + assertThat(retrievedRecord.getCustomCounter()).isEqualTo(15L); + assertThat(retrievedRecord.getDecreasingCounter()).isEqualTo(-21L); } @Test diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index 12012995d49c..41088e47f0b2 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java index 0fd0354a395b..fd28e616881d 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java @@ -80,8 +80,9 @@ * String policyVersion = getPolicyResponse.defaultVersionId(); * GetPolicyVersionResponse getPolicyVersionResponse = * iam.getPolicyVersion(r -> r.policyArn(policyArn).versionId(policyVersion)); - * - * IamPolicy policy = IamPolicy.fromJson(getPolicyVersionResponse.policyVersion().document()); + * + * String decodedPolicy = URLDecoder.decode(getPolicyVersionResponse.policyVersion().document(), StandardCharsets.UTF_8); + * IamPolicy policy = IamPolicy.fromJson(decodedPolicy); * * IamStatement newStatement = policy.statements().get(0).copy(s -> s.addAction("dynamodb:GetItem")); * IamPolicy newPolicy = policy.copy(p -> p.statements(Arrays.asList(newStatement))); diff --git a/services-custom/pom.xml b/services-custom/pom.xml index c8634fbe9632..fdf097dc855e 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 002a35000395..6c1728ecd178 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 65bcc0c362b4..c6570f3895d9 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index ba25a84b29fe..130bdd72fdf0 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/acm/pom.xml b/services/acm/pom.xml index a71a52d72f65..1002556c3636 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 3253e20cb6fc..d70b9efd9dac 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index dcae2f094628..c4403b8a3885 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config index a07c63b58af5..9ef3f846f7a8 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 0a90c7e6e5d3..c6f491a50b4d 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 0dbc6c19bfb4..a9c3d32b9ee1 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index af367f1aa951..1fc646f46476 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 46c9847752d8..6207c667c313 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index d40d9234c957..bb919900cb82 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/customization.config b/services/apigateway/src/main/resources/codegen-resources/customization.config index 040a30404bf1..9662110a579f 100644 --- a/services/apigateway/src/main/resources/codegen-resources/customization.config +++ b/services/apigateway/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "emitAsShape": "String" } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "updateAccount", "createApiKey", "generateClientCertificate" diff --git a/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json index 825415021baa..618041523568 100644 --- a/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,64 +45,17 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "ref": "UseFIPS" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { "conditions": [ @@ -111,19 +63,51 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "booleanEquals", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -133,90 +117,109 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -229,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json b/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json index 0256617c6b88..3d5b86aed1f7 100644 --- a/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1136 +1,18 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.sa-east-1.amazonaws.com" + "url": "https://apigateway.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -1139,535 +21,483 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.amazonaws.com" + "url": "https://apigateway.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-west-1.api.aws" + "url": "https://apigateway.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-west-1.amazonaws.com" + "url": "https://apigateway.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.api.aws" + "url": "https://apigateway.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.amazonaws.com" + "url": "https://apigateway.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-1.api.aws" + "url": "https://apigateway.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-1.amazonaws.com" + "url": "https://apigateway.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.api.aws" + "url": "https://apigateway.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.amazonaws.com" + "url": "https://apigateway.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-2.api.aws" + "url": "https://apigateway.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-2.amazonaws.com" + "url": "https://apigateway.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://apigateway.eu-west-2.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://apigateway.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://apigateway.me-south-1.amazonaws.com" + } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" + "url": "https://apigateway.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.api.aws" + "url": "https://apigateway.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.amazonaws.com" + "url": "https://apigateway.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-3.api.aws" + "url": "https://apigateway.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-3.amazonaws.com" + "url": "https://apigateway.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.api.aws" + "url": "https://apigateway-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.amazonaws.com" + "url": "https://apigateway-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-4.api.aws" + "url": "https://apigateway.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apigateway.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-4.amazonaws.com" + "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-1.api.aws" + "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-1.amazonaws.com" + "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-1.api.aws" + "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-1.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apigateway.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-2.api.aws" + "url": "https://apigateway-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-2.amazonaws.com" + "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-2.api.aws" + "url": "https://apigateway.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-2.amazonaws.com" + "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false - } - }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +509,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,8 +520,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -1703,21 +533,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +571,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 70ba1fdc6064..eb167860d6ab 100644 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -2138,7 +2138,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    An AWS Marketplace customer identifier , when integrating with the AWS SaaS Marketplace.

    " + "documentation":"

    An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace.

    " }, "description":{ "shape":"String", @@ -2475,7 +2475,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    An AWS Marketplace customer identifier , when integrating with the AWS SaaS Marketplace.

    " + "documentation":"

    An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -2689,7 +2689,7 @@ }, "certificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "regionalCertificateName":{ "shape":"String", @@ -2697,7 +2697,7 @@ }, "regionalCertificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by regional endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by regional endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "endpointConfiguration":{ "shape":"EndpointConfiguration", @@ -2834,7 +2834,7 @@ }, "apiKeySource":{ "shape":"ApiKeySourceType", - "documentation":"

    The source of the API key for metering requests according to a usage plan. Valid values are: >HEADER to read the API key from the X-API-Key header of a request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

    " + "documentation":"

    The source of the API key for metering requests according to a usage plan. Valid values are: HEADER to read the API key from the X-API-Key header of a request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

    " }, "endpointConfiguration":{ "shape":"EndpointConfiguration", @@ -2985,7 +2985,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

    " + "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same Amazon Web Services account of the API owner.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -3631,7 +3631,7 @@ }, "certificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "certificateUploadDate":{ "shape":"Timestamp", @@ -3651,7 +3651,7 @@ }, "regionalCertificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used for validating the regional domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used for validating the regional domain name. Certificate Manager is the only supported source.

    " }, "distributionDomainName":{ "shape":"String", @@ -3931,7 +3931,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    The identifier of a customer in AWS Marketplace or an external system, such as a developer portal.

    ", + "documentation":"

    The identifier of a customer in Amazon Web Services Marketplace or an external system, such as a developer portal.

    ", "location":"querystring", "locationName":"customerId" }, @@ -5041,7 +5041,7 @@ }, "mode":{ "shape":"PutMode", - "documentation":"

    A query parameter to indicate whether to overwrite (OVERWRITE) any existing DocumentationParts definition or to merge (MERGE) the new definition into the existing one. The default value is MERGE.

    ", + "documentation":"

    A query parameter to indicate whether to overwrite (overwrite) any existing DocumentationParts definition or to merge (merge) the new definition into the existing one. The default value is merge.

    ", "location":"querystring", "locationName":"mode" }, @@ -5071,7 +5071,7 @@ }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

    A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

    To exclude DocumentationParts from the import, set parameters as ignore=documentation.

    To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

    To handle imported basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split.

    For example, the AWS CLI command to exclude documentation from the imported API is:

    The AWS CLI command to set the regional endpoint on the imported API is:

    ", + "documentation":"

    A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

    To exclude DocumentationParts from the import, set parameters as ignore=documentation.

    To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

    To handle imported basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split.

    ", "location":"querystring" }, "body":{ @@ -5092,11 +5092,11 @@ }, "httpMethod":{ "shape":"String", - "documentation":"

    Specifies the integration's HTTP method type.

    " + "documentation":"

    Specifies the integration's HTTP method type. For the Type property, if you specify MOCK, this property is optional. For Lambda integrations, you must set the integration method to POST. For all other types, you must specify this property.

    " }, "uri":{ "shape":"String", - "documentation":"

    Specifies Uniform Resource Identifier (URI) of the integration endpoint.

    For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing. For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated Amazon Web Services service (e.g., s3); and {subdomain} is a designated subdomain supported by certain Amazon Web Services service for fast host-name lookup. action can be used for an Amazon Web Services service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an Amazon Web Services service resource, including the region of the integrated Amazon Web Services service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

    " + "documentation":"

    Specifies Uniform Resource Identifier (URI) of the integration endpoint.

    For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification for standard integrations. If connectionType is VPC_LINK specify the Network Load Balancer DNS name. For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated Amazon Web Services service (e.g., s3); and {subdomain} is a designated subdomain supported by certain Amazon Web Services service for fast host-name lookup. action can be used for an Amazon Web Services service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an Amazon Web Services service path-based API. The ensuing service_api refers to the path to an Amazon Web Services service resource, including the region of the integrated Amazon Web Services service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

    " }, "connectionType":{ "shape":"ConnectionType", @@ -5108,7 +5108,7 @@ }, "credentials":{ "shape":"String", - "documentation":"

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported AWS services, specify null.

    " + "documentation":"

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported Amazon Web Services services, specify null.

    " }, "requestParameters":{ "shape":"MapOfStringToString", @@ -5147,7 +5147,7 @@ "documentation":"

    Specifies the TLS configuration for an integration.

    " } }, - "documentation":"

    Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

    " + "documentation":"

    Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

    " }, "IntegrationResponse":{ "type":"structure", @@ -5158,7 +5158,7 @@ }, "selectionPattern":{ "shape":"String", - "documentation":"

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the back end. For example, if the success response returns nothing and the error response returns some string, you could use the .+ regex to match error response. However, make sure that the error response does not contain any newline (\\n) character in such cases. If the back end is an AWS Lambda function, the AWS Lambda function error header is matched. For all other HTTP and AWS back ends, the HTTP status code is matched.

    " + "documentation":"

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the back end. For example, if the success response returns nothing and the error response returns some string, you could use the .+ regex to match error response. However, make sure that the error response does not contain any newline (\\n) character in such cases. If the back end is an Lambda function, the Lambda function error header is matched. For all other HTTP and Amazon Web Services back ends, the HTTP status code is matched.

    " }, "responseParameters":{ "shape":"MapOfStringToString", @@ -5177,7 +5177,7 @@ }, "IntegrationType":{ "type":"string", - "documentation":"

    The integration type. The valid value is HTTP for integrating an API method with an HTTP backend; AWS with any AWS service endpoints; MOCK for testing without actually invoking the backend; HTTP_PROXY for integrating with the HTTP proxy integration; AWS_PROXY for integrating with the Lambda proxy integration.

    ", + "documentation":"

    The integration type. The valid value is HTTP for integrating an API method with an HTTP backend; AWS with any Amazon Web Services service endpoints; MOCK for testing without actually invoking the backend; HTTP_PROXY for integrating with the HTTP proxy integration; AWS_PROXY for integrating with the Lambda proxy integration.

    ", "enum":[ "HTTP", "AWS", @@ -5440,43 +5440,43 @@ "members":{ "metricsEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether Amazon CloudWatch metrics are enabled for this method. The PATCH path for this setting is /{method_setting_key}/metrics/enabled, and the value is a Boolean.

    " + "documentation":"

    Specifies whether Amazon CloudWatch metrics are enabled for this method.

    " }, "loggingLevel":{ "shape":"String", - "documentation":"

    Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO. Choose ERROR to write only error-level entries to CloudWatch Logs, or choose INFO to include all ERROR events as well as extra informational events.

    " + "documentation":"

    Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. Valid values are OFF, ERROR, and INFO. Choose ERROR to write only error-level entries to CloudWatch Logs, or choose INFO to include all ERROR events as well as extra informational events.

    " }, "dataTraceEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether data trace logging is enabled for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

    " + "documentation":"

    Specifies whether data trace logging is enabled for this method, which affects the log entries pushed to Amazon CloudWatch Logs.

    " }, "throttlingBurstLimit":{ "shape":"Integer", - "documentation":"

    Specifies the throttling burst limit. The PATCH path for this setting is /{method_setting_key}/throttling/burstLimit, and the value is an integer.

    " + "documentation":"

    Specifies the throttling burst limit.

    " }, "throttlingRateLimit":{ "shape":"Double", - "documentation":"

    Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, and the value is a double.

    " + "documentation":"

    Specifies the throttling rate limit.

    " }, "cachingEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. The PATCH path for this setting is /{method_setting_key}/caching/enabled, and the value is a Boolean.

    " + "documentation":"

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached.

    " }, "cacheTtlInSeconds":{ "shape":"Integer", - "documentation":"

    Specifies the time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. The PATCH path for this setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an integer.

    " + "documentation":"

    Specifies the time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached.

    " }, "cacheDataEncrypted":{ "shape":"Boolean", - "documentation":"

    Specifies whether the cached responses are encrypted. The PATCH path for this setting is /{method_setting_key}/caching/dataEncrypted, and the value is a Boolean.

    " + "documentation":"

    Specifies whether the cached responses are encrypted.

    " }, "requireAuthorizationForCacheControl":{ "shape":"Boolean", - "documentation":"

    Specifies whether authorization is required for a cache invalidation request. The PATCH path for this setting is /{method_setting_key}/caching/requireAuthorizationForCacheControl, and the value is a Boolean.

    " + "documentation":"

    Specifies whether authorization is required for a cache invalidation request.

    " }, "unauthorizedCacheControlHeaderStrategy":{ "shape":"UnauthorizedCacheControlHeaderStrategy", - "documentation":"

    Specifies how to handle unauthorized requests for cache invalidation. The PATCH path for this setting is /{method_setting_key}/caching/unauthorizedCacheControlHeaderStrategy, and the available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER.

    " + "documentation":"

    Specifies how to handle unauthorized requests for cache invalidation.

    " } }, "documentation":"

    Specifies the method setting properties.

    " @@ -6088,6 +6088,10 @@ "disableExecuteApiEndpoint":{ "shape":"Boolean", "documentation":"

    Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint.

    " + }, + "rootResourceId":{ + "shape":"String", + "documentation":"

    The API's root resource ID.

    " } }, "documentation":"

    Represents a REST API.

    " @@ -6412,7 +6416,7 @@ }, "latency":{ "shape":"Long", - "documentation":"

    The execution latency of the test authorizer request.

    " + "documentation":"

    The execution latency, in ms, of the test authorizer request.

    " }, "principalId":{ "shape":"String", @@ -6511,7 +6515,7 @@ }, "latency":{ "shape":"Long", - "documentation":"

    The execution latency of the test invoke request.

    " + "documentation":"

    The execution latency, in ms, of the test invoke request.

    " } }, "documentation":"

    Represents the response of the test invoke request in the HTTP method.

    " @@ -6751,7 +6755,7 @@ "members":{ "restApiId":{ "shape":"String", - "documentation":"

    The string identifier of the associated RestApi..

    ", + "documentation":"

    The string identifier of the associated RestApi.

    ", "location":"uri", "locationName":"restapi_id" }, @@ -7191,7 +7195,7 @@ }, "productCode":{ "shape":"String", - "documentation":"

    The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.

    " + "documentation":"

    The Amazon Web Services Marketplace product identifier to associate with the usage plan as a SaaS product on the Amazon Web Services Marketplace.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -7263,7 +7267,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

    " + "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same Amazon Web Services account of the API owner.

    " }, "status":{ "shape":"VpcLinkStatus", @@ -7302,5 +7306,5 @@ "documentation":"

    The collection of VPC links under the caller's account in a region.

    " } }, - "documentation":"Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    " + "documentation":"Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    " } diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 608878a40838..3df19dab349e 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 68937fc62820..c1734462eff1 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 05f5db83bc2a..8ae4ca30b4b4 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 81d7c63e5336..1039dd5b415f 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index 8238986e1989..fda8067145a7 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index e7969fad5831..1aff7e15622a 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/appflow/src/main/resources/codegen-resources/endpoint-rule-set.json index 02056ce3eb54..85ec7f0d7154 100644 --- a/services/appflow/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/appflow/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://appflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://appflow-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://appflow.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://appflow.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://appflow.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/appflow/src/main/resources/codegen-resources/service-2.json b/services/appflow/src/main/resources/codegen-resources/service-2.json index e34290b51993..625263a8fbfc 100644 --- a/services/appflow/src/main/resources/codegen-resources/service-2.json +++ b/services/appflow/src/main/resources/codegen-resources/service-2.json @@ -61,7 +61,8 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"}, {"shape":"ConnectorAuthenticationException"}, - {"shape":"ConnectorServerException"} + {"shape":"ConnectorServerException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

    Enables your application to create a new flow using Amazon AppFlow. You must create a connector profile before calling this API. Please note that the Request Syntax below shows syntax for multiple destinations, however, you can only transfer data to one item in this list at a time. Amazon AppFlow does not currently support flows to multiple destinations at once.

    " }, @@ -411,7 +412,8 @@ {"shape":"ConflictException"}, {"shape":"ConnectorAuthenticationException"}, {"shape":"ConnectorServerException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

    Updates an existing flow.

    " } @@ -2675,6 +2677,14 @@ "recordsProcessed":{ "shape":"Long", "documentation":"

    The number of records processed in the flow run.

    " + }, + "numParallelProcesses":{ + "shape":"Long", + "documentation":"

    The number of processes that Amazon AppFlow ran at the same time when it retrieved your data.

    " + }, + "maxPageSize":{ + "shape":"Long", + "documentation":"

    The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application.

    " } }, "documentation":"

    Specifies the end result of the flow run.

    " @@ -4329,18 +4339,60 @@ }, "documentation":"

    The properties that are applied when using SAPOData as a flow destination

    " }, + "SAPODataMaxPageSize":{ + "type":"integer", + "max":10000, + "min":1 + }, + "SAPODataMaxParallelism":{ + "type":"integer", + "max":10, + "min":1 + }, "SAPODataMetadata":{ "type":"structure", "members":{ }, "documentation":"

    The connector metadata specific to SAPOData.

    " }, + "SAPODataPaginationConfig":{ + "type":"structure", + "required":["maxPageSize"], + "members":{ + "maxPageSize":{ + "shape":"SAPODataMaxPageSize", + "documentation":"

    The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000.

    ", + "box":true + } + }, + "documentation":"

    Sets the page size for each concurrent process that transfers OData records from your SAP instance. A concurrent process is query that retrieves a batch of records as part of a flow run. Amazon AppFlow can run multiple concurrent processes in parallel to transfer data faster.

    " + }, + "SAPODataParallelismConfig":{ + "type":"structure", + "required":["maxParallelism"], + "members":{ + "maxParallelism":{ + "shape":"SAPODataMaxParallelism", + "documentation":"

    The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application.

    ", + "box":true + } + }, + "documentation":"

    Sets the number of concurrent processes that transfer OData records from your SAP instance. A concurrent process is query that retrieves a batch of records as part of a flow run. Amazon AppFlow can run multiple concurrent processes in parallel to transfer data faster.

    " + }, "SAPODataSourceProperties":{ "type":"structure", "members":{ "objectPath":{ "shape":"Object", "documentation":"

    The object path specified in the SAPOData flow source.

    " + }, + "parallelismConfig":{ + "shape":"SAPODataParallelismConfig", + "documentation":"

    Sets the number of concurrent processes that transfers OData records from your SAP instance.

    " + }, + "paginationConfig":{ + "shape":"SAPODataPaginationConfig", + "documentation":"

    Sets the page size for each concurrent process that transfers OData records from your SAP instance.

    " } }, "documentation":"

    The properties that are applied when using SAPOData as a flow source.

    " @@ -4599,10 +4651,6 @@ }, "ServiceNowConnectorProfileCredentials":{ "type":"structure", - "required":[ - "username", - "password" - ], "members":{ "username":{ "shape":"Username", @@ -4611,6 +4659,10 @@ "password":{ "shape":"Password", "documentation":"

    The password that corresponds to the user name.

    " + }, + "oAuth2Credentials":{ + "shape":"OAuth2Credentials", + "documentation":"

    The OAuth 2.0 credentials required to authenticate the user.

    " } }, "documentation":"

    The connector-specific profile credentials required when using ServiceNow.

    " diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index feed4b1f497e..ae81f3551640 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 902912c32953..0096e286e994 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index c235229a2875..d05980f0630e 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 0f37c85e99b3..495ad40e8721 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config index 4864947dd29a..d46992d3cb02 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config @@ -9,7 +9,7 @@ "describeExportConfigurations", "getDiscoverySummary" ], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "startContinuousExport", "describeContinuousExports" ], diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 95f7a902d9c9..f74ceeb73914 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index e65154687d12..e349b486aae6 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index a4fc93a3c026..63f3c9010cbb 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/apprunner/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/apprunner/src/main/resources/codegen-resources/endpoint-rule-set.json index 9d35edd948f4..705f0bb67989 100644 --- a/services/apprunner/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/apprunner/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apprunner-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://apprunner-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://apprunner-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://apprunner-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apprunner.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://apprunner.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://apprunner.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://apprunner.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/apprunner/src/main/resources/codegen-resources/service-2.json b/services/apprunner/src/main/resources/codegen-resources/service-2.json index b33c1b645a51..f1e333f14afb 100644 --- a/services/apprunner/src/main/resources/codegen-resources/service-2.json +++ b/services/apprunner/src/main/resources/codegen-resources/service-2.json @@ -56,7 +56,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.

    A connection resource is needed to access GitHub repositories. GitHub requires a user interface approval process through the App Runner console before you can use the connection.

    " + "documentation":"

    Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.

    A connection resource is needed to access GitHub and Bitbucket repositories. Both require a user interface approval process through the App Runner console before you can use the connection.

    " }, "CreateObservabilityConfiguration":{ "name":"CreateObservabilityConfiguration", @@ -2182,7 +2182,10 @@ }, "ProviderType":{ "type":"string", - "enum":["GITHUB"] + "enum":[ + "GITHUB", + "BITBUCKET" + ] }, "ResourceNotFoundException":{ "type":"structure", @@ -2318,7 +2321,7 @@ }, "Status":{ "shape":"ServiceStatus", - "documentation":"

    The current state of the App Runner service. These particular values mean the following.

    • CREATE_FAILED – The service failed to create. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and retry the call to create the service.

      The failed service isn't usable, and still counts towards your service quota. When you're done analyzing the failure, delete the service.

    • DELETE_FAILED – The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.

    " + "documentation":"

    The current state of the App Runner service. These particular values mean the following.

    • CREATE_FAILED – The service failed to create. The failed service isn't usable, and still counts towards your service quota. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using UpdateService.

    • DELETE_FAILED – The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.

    " }, "SourceConfiguration":{ "shape":"SourceConfiguration", @@ -2431,7 +2434,7 @@ }, "Status":{ "shape":"ServiceStatus", - "documentation":"

    The current state of the App Runner service. These particular values mean the following.

    • CREATE_FAILED – The service failed to create. Read the failure events and logs, change any parameters that need to be fixed, and retry the call to create the service.

      The failed service isn't usable, and still counts towards your service quota. When you're done analyzing the failure, delete the service.

    • DELETE_FAILED – The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.

    " + "documentation":"

    The current state of the App Runner service. These particular values mean the following.

    • CREATE_FAILED – The service failed to create. The failed service isn't usable, and still counts towards your service quota. To troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using UpdateService.

    • DELETE_FAILED – The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.

    " } }, "documentation":"

    Provides summary information for an App Runner service.

    This type contains limited information about a service. It doesn't include configuration details. It's returned by the ListServices action. Complete service information is returned by the CreateService, DescribeService, and DeleteService actions using the Service type.

    " diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 89cabb2780d7..429f4f496fb0 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appstream/src/main/resources/codegen-resources/customization.config b/services/appstream/src/main/resources/codegen-resources/customization.config index d6ce3dce1a2e..b605d120b9ab 100644 --- a/services/appstream/src/main/resources/codegen-resources/customization.config +++ b/services/appstream/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "updateFleet", "describeUserStackAssociations" ], diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index bd59a5f2f4e0..c576d0be0a35 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT appsync diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index f34ff49ae46c..775dc8d09a7b 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/athena/pom.xml b/services/athena/pom.xml index e33cbf0286f6..21abc922d9d3 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index ed2071e60a32..f6f9d8c6143c 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/auditmanager/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/auditmanager/src/main/resources/codegen-resources/endpoint-rule-set.json index 29247a7d2dd7..b38eb1c9a64e 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/auditmanager/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://auditmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://auditmanager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://auditmanager-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://auditmanager-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://auditmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://auditmanager.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://auditmanager.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://auditmanager.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json index 8420122febbb..af2cdd21159d 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -1013,7 +1013,8 @@ }, "AWSAccounts":{ "type":"list", - "member":{"shape":"AWSAccount"} + "member":{"shape":"AWSAccount"}, + "sensitive":true }, "AWSService":{ "type":"structure", @@ -1081,12 +1082,14 @@ "ActionPlanInstructions":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ActionPlanTitle":{ "type":"string", "max":300, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "Assessment":{ "type":"structure", @@ -1205,7 +1208,8 @@ "AssessmentDescription":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "AssessmentEvidenceFolder":{ "type":"structure", @@ -1312,7 +1316,8 @@ "documentation":"

    The control sets that are associated with the framework.

    " } }, - "documentation":"

    The file used to structure and automate Audit Manager assessments for a given compliance standard.

    " + "documentation":"

    The file used to structure and automate Audit Manager assessments for a given compliance standard.

    ", + "sensitive":true }, "AssessmentFrameworkDescription":{ "type":"string", @@ -1532,7 +1537,8 @@ "type":"string", "max":300, "min":1, - "pattern":"^[^\\\\]*$" + "pattern":"^[^\\\\]*$", + "sensitive":true }, "AssessmentReport":{ "type":"structure", @@ -1579,7 +1585,8 @@ "AssessmentReportDescription":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "AssessmentReportDestinationType":{ "type":"string", @@ -1671,7 +1678,8 @@ "documentation":"

    The destination bucket where Audit Manager stores assessment reports.

    " } }, - "documentation":"

    The location where Audit Manager saves assessment reports for the given assessment.

    " + "documentation":"

    The location where Audit Manager saves assessment reports for the given assessment.

    ", + "sensitive":true }, "AssessmentReportsMetadata":{ "type":"list", @@ -1771,7 +1779,8 @@ }, "BatchCreateDelegationByAssessmentErrors":{ "type":"list", - "member":{"shape":"BatchCreateDelegationByAssessmentError"} + "member":{"shape":"BatchCreateDelegationByAssessmentError"}, + "sensitive":true }, "BatchCreateDelegationByAssessmentRequest":{ "type":"structure", @@ -1825,7 +1834,8 @@ }, "BatchDeleteDelegationByAssessmentErrors":{ "type":"list", - "member":{"shape":"BatchDeleteDelegationByAssessmentError"} + "member":{"shape":"BatchDeleteDelegationByAssessmentError"}, + "sensitive":true }, "BatchDeleteDelegationByAssessmentRequest":{ "type":"structure", @@ -1996,7 +2006,8 @@ "ComplianceType":{ "type":"string", "max":100, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "Control":{ "type":"structure", @@ -2085,7 +2096,8 @@ "ControlCommentBody":{ "type":"string", "max":500, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ControlComments":{ "type":"list", @@ -2316,7 +2328,8 @@ "ControlSets":{ "type":"list", "member":{"shape":"ControlSet"}, - "min":1 + "min":1, + "sensitive":true }, "ControlSetsCount":{"type":"integer"}, "ControlSources":{ @@ -2607,13 +2620,15 @@ "type":"list", "member":{"shape":"CreateDelegationRequest"}, "max":50, - "min":1 + "min":1, + "sensitive":true }, "CreatedBy":{ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$", + "sensitive":true }, "DefaultExportDestination":{ "type":"structure", @@ -2677,12 +2692,14 @@ "documentation":"

    The user or role that created the delegation.

    " } }, - "documentation":"

    The assignment of a control set to a delegate for review.

    " + "documentation":"

    The assignment of a control set to a delegate for review.

    ", + "sensitive":true }, "DelegationComment":{ "type":"string", "max":350, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "DelegationIds":{ "type":"list", @@ -2916,7 +2933,8 @@ "type":"string", "max":320, "min":1, - "pattern":"^.*@.*$" + "pattern":"^.*@.*$", + "sensitive":true }, "ErrorCode":{ "type":"string", @@ -3466,7 +3484,8 @@ "shape":"NonEmptyString", "documentation":"

    The presigned URL that was generated.

    " } - } + }, + "sensitive":true }, "GetEvidenceFolderRequest":{ "type":"structure", @@ -3843,7 +3862,8 @@ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" + "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$", + "sensitive":true }, "ListAssessmentControlInsightsByControlDomainRequest":{ "type":"structure", @@ -4292,13 +4312,15 @@ "type":"string", "max":300, "min":1, - "pattern":"[^\\/]*" + "pattern":"[^\\/]*", + "sensitive":true }, "ManualEvidenceTextResponse":{ "type":"string", "max":1000, "min":1, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "MaxResults":{ "type":"integer", @@ -4491,7 +4513,8 @@ }, "Roles":{ "type":"list", - "member":{"shape":"Role"} + "member":{"shape":"Role"}, + "sensitive":true }, "S3Url":{ "type":"string", @@ -4503,7 +4526,8 @@ "type":"string", "max":255, "min":1, - "pattern":"^[a-zA-Z0-9-_\\(\\)\\[\\]]+$" + "pattern":"^[a-zA-Z0-9-_\\(\\)\\[\\]]+$", + "sensitive":true }, "Scope":{ "type":"structure", @@ -4517,7 +4541,8 @@ "documentation":"

    The Amazon Web Services services that are included in the scope of the assessment.

    " } }, - "documentation":"

    The wrapper that contains the Amazon Web Services accounts and services that are in scope for the assessment.

    " + "documentation":"

    The wrapper that contains the Amazon Web Services accounts and services that are in scope for the assessment.

    ", + "sensitive":true }, "ServiceMetadata":{ "type":"structure", @@ -4789,7 +4814,8 @@ "TestingInformation":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "ThrottlingException":{ "type":"structure", @@ -4817,7 +4843,8 @@ "TroubleshootingText":{ "type":"string", "max":1000, - "pattern":"^[\\w\\W\\s\\S]*$" + "pattern":"^[\\w\\W\\s\\S]*$", + "sensitive":true }, "URL":{ "type":"structure", @@ -5224,7 +5251,8 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z0-9-_()\\s\\+=,.@]+$" + "pattern":"^[a-zA-Z0-9-_()\\s\\+=,.@]+$", + "sensitive":true }, "ValidateAssessmentReportIntegrityRequest":{ "type":"structure", diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index da669963b878..4d4e297f7508 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 03292056a068..46d21ccd2e3b 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 17f10c5b37b0..85dcc6e5e7e4 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/customization.config b/services/backup/src/main/resources/codegen-resources/customization.config index 6dc948981354..1cb199bb4714 100644 --- a/services/backup/src/main/resources/codegen-resources/customization.config +++ b/services/backup/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getSupportedResourceTypes" ], "verifiedSimpleMethods" : [ diff --git a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json index 49fbf7acba9a..dc2e1fc92d97 100644 --- a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 3e72248e09a5..379cbe80b72d 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -135,7 +135,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This request creates a logical container where backups are stored.

    This request includes a name, optionally one or more resource tags, an encryption key, and a request ID.

    Do not include sensitive data, such as passport numbers, in the name of a backup vault.

    ", + "documentation":"

    This request creates a logical container to where backups may be copied.

    This request includes a name, the Region, the maximum number of retention days, the minimum number of retention days, and optionally can include tags and a creator request ID.

    Do not include sensitive data, such as passport numbers, in the name of a backup vault.

    ", "idempotent":true }, "CreateReportPlan":{ @@ -1580,6 +1580,10 @@ "EnableContinuousBackup":{ "shape":"Boolean", "documentation":"

    Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups.

    " + }, + "ScheduleExpressionTimezone":{ + "shape":"Timezone", + "documentation":"

    This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

    " } }, "documentation":"

    Specifies a scheduled task used to back up a selection of resources.

    " @@ -1626,6 +1630,10 @@ "EnableContinuousBackup":{ "shape":"Boolean", "documentation":"

    Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups.

    " + }, + "ScheduleExpressionTimezone":{ + "shape":"Timezone", + "documentation":"

    This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

    " } }, "documentation":"

    Specifies a scheduled task used to back up a selection of resources.

    " @@ -3279,7 +3287,7 @@ }, "ControlScope":{ "shape":"ControlScope", - "documentation":"

    The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans. For more information, see ControlScope.

    " + "documentation":"

    The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans.

    " } }, "documentation":"

    Contains detailed information about all of the controls of a framework. Each framework must contain at least one control.

    " @@ -5486,6 +5494,7 @@ "value":{"shape":"TagValue"}, "sensitive":true }, + "Timezone":{"type":"string"}, "UntagResourceInput":{ "type":"structure", "required":[ diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index cbae64ce942d..e488cc954ae3 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index d1ea59048cd0..3b61f094539b 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 5fea8f4a9f94..6cb7b65c7eae 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/customization.config b/services/batch/src/main/resources/codegen-resources/customization.config index c597ff8c197f..ef427d6052cf 100644 --- a/services/batch/src/main/resources/codegen-resources/customization.config +++ b/services/batch/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "describeJobDefinitions", "describeJobQueues" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "listJobs" ] } diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 07e1650b12ab..1db33a2acc52 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json index a71edcaf080e..260bb262df98 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,291 +115,251 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws" + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://billingconductor.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] + "name": "sigv4", + "signingName": "billingconductor", + "signingRegion": "us-east-1" } - ], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" + true + ] }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://billingconductor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://billingconductor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://billingconductor.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://billingconductor.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://billingconductor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/billingconductor/src/main/resources/codegen-resources/service-2.json b/services/billingconductor/src/main/resources/codegen-resources/service-2.json index 87d16cf39559..33d7b280a5cf 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/service-2.json +++ b/services/billingconductor/src/main/resources/codegen-resources/service-2.json @@ -1342,6 +1342,10 @@ "Type":{ "shape":"CustomLineItemType", "documentation":"

    The type of the custom line item that indicates whether the charge is a fee or credit.

    " + }, + "LineItemFilters":{ + "shape":"LineItemFiltersList", + "documentation":"

    A representation of the line item filter.

    " } }, "documentation":"

    The charge details of a custom line item. It should contain only one of Flat or Percentage.

    " @@ -1707,6 +1711,49 @@ "exception":true, "fault":true }, + "LineItemFilter":{ + "type":"structure", + "required":[ + "Attribute", + "MatchOption", + "Values" + ], + "members":{ + "Attribute":{ + "shape":"LineItemFilterAttributeName", + "documentation":"

    The attribute of the line item filter. This specifies what attribute that you can filter on.

    " + }, + "MatchOption":{ + "shape":"MatchOption", + "documentation":"

    The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.

    " + }, + "Values":{ + "shape":"LineItemFilterValuesList", + "documentation":"

    The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plan discounts.

    " + } + }, + "documentation":"

    A representation of the line item filter for your custom line item. You can use line item filters to include or exclude specific resource values from the billing group's total cost. For example, if you create a custom line item and you want to filter out a value, such as Savings Plan discounts, you can update LineItemFilter to exclude it.

    " + }, + "LineItemFilterAttributeName":{ + "type":"string", + "enum":["LINE_ITEM_TYPE"] + }, + "LineItemFilterValue":{ + "type":"string", + "enum":["SAVINGS_PLAN_NEGATION"] + }, + "LineItemFilterValuesList":{ + "type":"list", + "member":{"shape":"LineItemFilterValue"}, + "max":1, + "min":1 + }, + "LineItemFiltersList":{ + "type":"list", + "member":{"shape":"LineItemFilter"}, + "max":1, + "min":0 + }, "ListAccountAssociationsFilter":{ "type":"structure", "members":{ @@ -1880,6 +1927,10 @@ "Type":{ "shape":"CustomLineItemType", "documentation":"

    The type of the custom line item that indicates whether the charge is a fee or credit.

    " + }, + "LineItemFilters":{ + "shape":"LineItemFiltersList", + "documentation":"

    A representation of the line item filter.

    " } }, "documentation":"

    A representation of the charge details of a custom line item.

    " @@ -2297,6 +2348,10 @@ }, "Margin":{"type":"string"}, "MarginPercentage":{"type":"string"}, + "MatchOption":{ + "type":"string", + "enum":["NOT_EQUAL"] + }, "MaxBillingGroupResults":{ "type":"integer", "box":true, @@ -2797,6 +2852,10 @@ "Percentage":{ "shape":"UpdateCustomLineItemPercentageChargeDetails", "documentation":"

    An UpdateCustomLineItemPercentageChargeDetails that describes the new charge details of a percentage custom line item.

    " + }, + "LineItemFilters":{ + "shape":"LineItemFiltersList", + "documentation":"

    A representation of the line item filter.

    " } }, "documentation":"

    A representation of the new charge details of a custom line item. This should contain only one of Flat or Percentage.

    " diff --git a/services/braket/pom.xml b/services/braket/pom.xml index a6f0a8a52397..dd35866b2ae8 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 1d242abad212..94e06aa94901 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 07fbf715749f..5d6786c37599 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 69830834d090..44d6e352cfeb 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 7f2dc6752c3d..209e80aa9c10 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/endpoint-rule-set.json index df6d5f828d00..a451babf8a6f 100644 --- a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://media-pipelines-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://media-pipelines-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://media-pipelines-chime-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://media-pipelines-chime-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://media-pipelines-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://media-pipelines-chime.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://media-pipelines-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://media-pipelines-chime.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json index 5a045f8a262f..b6e1b7fbcad3 100644 --- a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json @@ -166,6 +166,7 @@ {"shape":"ThrottledClientException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], @@ -231,6 +232,46 @@ ], "documentation":"

    Gets an existing media pipeline.

    " }, + "GetSpeakerSearchTask":{ + "name":"GetSpeakerSearchTask", + "http":{ + "method":"GET", + "requestUri":"/media-insights-pipelines/{identifier}/speaker-search-tasks/{speakerSearchTaskId}", + "responseCode":200 + }, + "input":{"shape":"GetSpeakerSearchTaskRequest"}, + "output":{"shape":"GetSpeakerSearchTaskResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Retrieves the details of the specified speaker search task.

    " + }, + "GetVoiceToneAnalysisTask":{ + "name":"GetVoiceToneAnalysisTask", + "http":{ + "method":"GET", + "requestUri":"/media-insights-pipelines/{identifier}/voice-tone-analysis-tasks/{voiceToneAnalysisTaskId}", + "responseCode":200 + }, + "input":{"shape":"GetVoiceToneAnalysisTaskRequest"}, + "output":{"shape":"GetVoiceToneAnalysisTaskResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Retrieves the details of a voice tone analysis task.

    " + }, "ListMediaCapturePipelines":{ "name":"ListMediaCapturePipelines", "http":{ @@ -311,6 +352,88 @@ ], "documentation":"

    Lists the tags available for a media pipeline.

    " }, + "StartSpeakerSearchTask":{ + "name":"StartSpeakerSearchTask", + "http":{ + "method":"POST", + "requestUri":"/media-insights-pipelines/{identifier}/speaker-search-tasks?operation=start", + "responseCode":201 + }, + "input":{"shape":"StartSpeakerSearchTaskRequest"}, + "output":{"shape":"StartSpeakerSearchTaskResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Starts a speaker search task.

    Before starting any speaker search tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK.

    " + }, + "StartVoiceToneAnalysisTask":{ + "name":"StartVoiceToneAnalysisTask", + "http":{ + "method":"POST", + "requestUri":"/media-insights-pipelines/{identifier}/voice-tone-analysis-tasks?operation=start", + "responseCode":201 + }, + "input":{"shape":"StartVoiceToneAnalysisTaskRequest"}, + "output":{"shape":"StartVoiceToneAnalysisTaskResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Starts a voice tone analysis task. For more information about voice tone analysis, see Using Amazon Chime SDK voice analytics in the Amazon Chime SDK Developer Guide.

    Before starting any voice tone analysis tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK.

    " + }, + "StopSpeakerSearchTask":{ + "name":"StopSpeakerSearchTask", + "http":{ + "method":"POST", + "requestUri":"/media-insights-pipelines/{identifier}/speaker-search-tasks/{speakerSearchTaskId}?operation=stop", + "responseCode":204 + }, + "input":{"shape":"StopSpeakerSearchTaskRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Stops a speaker search task.

    " + }, + "StopVoiceToneAnalysisTask":{ + "name":"StopVoiceToneAnalysisTask", + "http":{ + "method":"POST", + "requestUri":"/media-insights-pipelines/{identifier}/voice-tone-analysis-tasks/{voiceToneAnalysisTaskId}?operation=stop", + "responseCode":204 + }, + "input":{"shape":"StopVoiceToneAnalysisTaskRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

    Stops a voice tone analysis task.

    " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -1303,7 +1426,7 @@ "members":{ "FragmentSelectorType":{ "shape":"FragmentSelectorType", - "documentation":"

    The origin of the timestamps to use, Server or Producer. For more information, see StartSelectorType in the Amazon Kinesis Video Streams Developer Guide.

    " + "documentation":"

    The origin of the timestamps to use, Server or Producer. For more information, see StartSelectorType in the Amazon Kinesis Video Streams Developer Guide.

    " }, "TimestampRange":{ "shape":"TimestampRange", @@ -1382,6 +1505,66 @@ } } }, + "GetSpeakerSearchTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "SpeakerSearchTaskId" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "SpeakerSearchTaskId":{ + "shape":"GuidString", + "documentation":"

    The ID of the speaker search task.

    ", + "location":"uri", + "locationName":"speakerSearchTaskId" + } + } + }, + "GetSpeakerSearchTaskResponse":{ + "type":"structure", + "members":{ + "SpeakerSearchTask":{ + "shape":"SpeakerSearchTask", + "documentation":"

    The details of the speaker search task.

    " + } + } + }, + "GetVoiceToneAnalysisTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "VoiceToneAnalysisTaskId" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "VoiceToneAnalysisTaskId":{ + "shape":"GuidString", + "documentation":"

    The ID of the voice tone anlysis task.

    ", + "location":"uri", + "locationName":"voiceToneAnalysisTaskId" + } + } + }, + "GetVoiceToneAnalysisTaskResponse":{ + "type":"structure", + "members":{ + "VoiceToneAnalysisTask":{ + "shape":"VoiceToneAnalysisTask", + "documentation":"

    The details of the voice tone analysis task.

    " + } + } + }, "GridViewConfiguration":{ "type":"structure", "required":["ContentShareLayout"], @@ -1569,6 +1752,28 @@ }, "documentation":"

    The runtime configuration settings for the Kinesis video stream source.

    " }, + "KinesisVideoStreamSourceTaskConfiguration":{ + "type":"structure", + "required":[ + "StreamArn", + "ChannelId" + ], + "members":{ + "StreamArn":{ + "shape":"KinesisVideoStreamArn", + "documentation":"

    The ARN of the stream.

    " + }, + "ChannelId":{ + "shape":"ChannelId", + "documentation":"

    The channel ID.

    " + }, + "FragmentNumber":{ + "shape":"FragmentNumberString", + "documentation":"

    The unique identifier of the fragment to begin processing.

    " + } + }, + "documentation":"

    The task configuration settings for the Kinesis video stream source.

    " + }, "LambdaFunctionSinkConfiguration":{ "type":"structure", "members":{ @@ -1940,6 +2145,10 @@ "CreatedTimestamp":{ "shape":"Iso8601Timestamp", "documentation":"

    The time at which the media insights pipeline was created.

    " + }, + "ElementStatuses":{ + "shape":"MediaInsightsPipelineElementStatuses", + "documentation":"

    The statuses that the elements in a media insights pipeline can have during data processing.

    " } }, "documentation":"

    A media pipeline that streams call analytics data.

    " @@ -2021,6 +2230,10 @@ "SnsTopicSinkConfiguration":{ "shape":"SnsTopicSinkConfiguration", "documentation":"

    The configuration settings for an SNS topic sink in a media insights pipeline configuration element.

    " + }, + "VoiceEnhancementSinkConfiguration":{ + "shape":"VoiceEnhancementSinkConfiguration", + "documentation":"

    The configuration settings for voice enhancement sink in a media insights pipeline configuration element.

    " } }, "documentation":"

    An element in a media insights pipeline configuration.

    " @@ -2035,7 +2248,8 @@ "LambdaFunctionSink", "SqsQueueSink", "SnsTopicSink", - "S3RecordingSink" + "S3RecordingSink", + "VoiceEnhancementSink" ] }, "MediaInsightsPipelineConfigurationElements":{ @@ -2070,6 +2284,24 @@ "type":"list", "member":{"shape":"MediaInsightsPipelineConfigurationSummary"} }, + "MediaInsightsPipelineElementStatus":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"MediaInsightsPipelineConfigurationElementType", + "documentation":"

    The type of status.

    " + }, + "Status":{ + "shape":"MediaPipelineElementStatus", + "documentation":"

    The element's status.

    " + } + }, + "documentation":"

    The status of the pipeline element.

    " + }, + "MediaInsightsPipelineElementStatuses":{ + "type":"list", + "member":{"shape":"MediaInsightsPipelineElementStatus"} + }, "MediaInsightsRuntimeMetadata":{ "type":"map", "key":{"shape":"NonEmptyString"}, @@ -2132,6 +2364,19 @@ }, "documentation":"

    A pipeline consisting of a media capture, media concatenation, or live-streaming pipeline.

    " }, + "MediaPipelineElementStatus":{ + "type":"string", + "enum":[ + "NotStarted", + "NotSupported", + "Initializing", + "InProgress", + "Failed", + "Stopping", + "Stopped", + "Paused" + ] + }, "MediaPipelineList":{ "type":"list", "member":{"shape":"MediaPipelineSummary"} @@ -2152,7 +2397,8 @@ "Failed", "Stopping", "Stopped", - "Paused" + "Paused", + "NotStarted" ] }, "MediaPipelineStatusUpdate":{ @@ -2176,6 +2422,17 @@ }, "documentation":"

    The summary of the media pipeline.

    " }, + "MediaPipelineTaskStatus":{ + "type":"string", + "enum":[ + "NotStarted", + "Initializing", + "InProgress", + "Failed", + "Stopping", + "Stopped" + ] + }, "MediaSampleRateHertz":{ "type":"integer", "max":48000, @@ -2539,6 +2796,28 @@ }, "documentation":"

    Source configuration for a specified media pipeline.

    " }, + "SpeakerSearchTask":{ + "type":"structure", + "members":{ + "SpeakerSearchTaskId":{ + "shape":"GuidString", + "documentation":"

    The speaker search task ID.

    " + }, + "SpeakerSearchTaskStatus":{ + "shape":"MediaPipelineTaskStatus", + "documentation":"

    The status of the speaker search task.

    " + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

    The time at which a speaker search task was created.

    " + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

    The time at which a speaker search task was updated.

    " + } + }, + "documentation":"

    A representation of an asynchronous request to perform speaker search analysis on a media insights pipeline.

    " + }, "SqsQueueSinkConfiguration":{ "type":"structure", "members":{ @@ -2549,6 +2828,122 @@ }, "documentation":"

    The configuration settings for the SQS sink.

    " }, + "StartSpeakerSearchTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "VoiceProfileDomainArn" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "VoiceProfileDomainArn":{ + "shape":"Arn", + "documentation":"

    The ARN of the voice profile domain that will store the voice profile.

    " + }, + "KinesisVideoStreamSourceTaskConfiguration":{ + "shape":"KinesisVideoStreamSourceTaskConfiguration", + "documentation":"

    The task configuration for the Kinesis video stream source of the media insights pipeline.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The unique identifier for the client request. Use a different token for different speaker search tasks.

    ", + "idempotencyToken":true + } + } + }, + "StartSpeakerSearchTaskResponse":{ + "type":"structure", + "members":{ + "SpeakerSearchTask":{ + "shape":"SpeakerSearchTask", + "documentation":"

    The details of the speaker search task.

    " + } + } + }, + "StartVoiceToneAnalysisTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "LanguageCode" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "LanguageCode":{ + "shape":"VoiceAnalyticsLanguageCode", + "documentation":"

    The language code.

    " + }, + "KinesisVideoStreamSourceTaskConfiguration":{ + "shape":"KinesisVideoStreamSourceTaskConfiguration", + "documentation":"

    The task configuration for the Kinesis video stream source of the media insights pipeline.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    The unique identifier for the client request. Use a different token for different voice tone analysis tasks.

    ", + "idempotencyToken":true + } + } + }, + "StartVoiceToneAnalysisTaskResponse":{ + "type":"structure", + "members":{ + "VoiceToneAnalysisTask":{ + "shape":"VoiceToneAnalysisTask", + "documentation":"

    The details of the voice tone analysis task.

    " + } + } + }, + "StopSpeakerSearchTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "SpeakerSearchTaskId" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "SpeakerSearchTaskId":{ + "shape":"GuidString", + "documentation":"

    The speaker search task ID.

    ", + "location":"uri", + "locationName":"speakerSearchTaskId" + } + } + }, + "StopVoiceToneAnalysisTaskRequest":{ + "type":"structure", + "required":[ + "Identifier", + "VoiceToneAnalysisTaskId" + ], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

    The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline.

    ", + "location":"uri", + "locationName":"identifier" + }, + "VoiceToneAnalysisTaskId":{ + "shape":"GuidString", + "documentation":"

    The ID of the voice tone analysis task.

    ", + "location":"uri", + "locationName":"voiceToneAnalysisTaskId" + } + } + }, "StreamChannelDefinition":{ "type":"structure", "required":["NumberOfChannels"], @@ -2831,7 +3226,7 @@ "documentation":"

    Sets the aspect ratio of the video tiles, such as 16:9.

    " } }, - "documentation":"

    Defines the configuration settings for a vertial layout.

    " + "documentation":"

    Defines the configuration settings for a vertical layout.

    " }, "VerticalTilePosition":{ "type":"string", @@ -2931,6 +3326,10 @@ "Disabled" ] }, + "VoiceAnalyticsLanguageCode":{ + "type":"string", + "enum":["en-US"] + }, "VoiceAnalyticsProcessorConfiguration":{ "type":"structure", "members":{ @@ -2944,6 +3343,38 @@ } }, "documentation":"

    The configuration settings for a voice analytics processor.

    " + }, + "VoiceEnhancementSinkConfiguration":{ + "type":"structure", + "members":{ + "Disabled":{ + "shape":"Boolean", + "documentation":"

    Disables the VoiceEnhancementSinkConfiguration element.

    " + } + }, + "documentation":"

    A static structure that contains the configuration data for a VoiceEnhancementSinkConfiguration element.

    " + }, + "VoiceToneAnalysisTask":{ + "type":"structure", + "members":{ + "VoiceToneAnalysisTaskId":{ + "shape":"GuidString", + "documentation":"

    The ID of the voice tone analysis task.

    " + }, + "VoiceToneAnalysisTaskStatus":{ + "shape":"MediaPipelineTaskStatus", + "documentation":"

    The status of a voice tone analysis task.

    " + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

    The time at which a voice tone analysis task was created.

    " + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

    The time at which a voice tone analysis task was updated.

    " + } + }, + "documentation":"

    A representation of an asynchronous request to perform voice tone analysis on a media insights pipeline.

    " } }, "documentation":"

    The Amazon Chime SDK media pipeline APIs in this section allow software developers to create Amazon Chime SDK media pipelines that capture, concatenate, or stream your Amazon Chime SDK meetings. For more information about media pipelines, see Amazon Chime SDK media pipelines.

    " diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index 2a490374a844..1c24bd8fd4eb 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json index ba8bb30e19c7..b1a809646283 100644 --- a/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://meetings-chime.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://meetings-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://meetings-chime.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json index de440d20ddc1..ef969e224189 100644 --- a/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,9 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table.

    You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see .

    When using capabilities, be aware of these corner cases:

    • You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive.

    • When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants.

    • When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.

    " }, @@ -81,6 +83,7 @@ "output":{"shape":"CreateMeetingResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"}, @@ -100,6 +103,7 @@ "output":{"shape":"CreateMeetingWithAttendeesResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"}, @@ -215,6 +219,13 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Returns a list of the tags available for the specified resource.

    " @@ -271,6 +282,12 @@ "output":{"shape":"TagResourceResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"TooManyTagsException"} ], @@ -287,6 +304,12 @@ "output":{"shape":"UntagResourceResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

    • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see the documentation for the service whose resource you want to untag.

    • You can only tag resources that are located in the specified AWS Region for the calling AWS account.

    Minimum permissions

    In addition to the tag:UntagResources permission required by this operation, you must also have the remove tags permission defined by the service that created the resource. For example, to remove the tags from an Amazon EC2 instance using the UntagResources operation, you must have both of the following permissions:

    tag:UntagResource

    ChimeSDKMeetings:DeleteTags

    " @@ -305,7 +328,9 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    The capabilities that you want to update.

    You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see .

    When using capabilities, be aware of these corner cases:

    • You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive.

    • When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants.

    • When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.

    " } diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index f4558557dfcc..95762db08b0d 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index f006831979b9..7bb803aa8b4c 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 18dafc6f301c..baac90167874 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json index a128e3bf3db3..412e9997909d 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,53 +1,53 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-gov-east-1.api.aws" + "url": "https://cleanrooms-fips.us-east-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-gov-east-1.amazonaws.com" + "url": "https://cleanrooms-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-gov-east-1.api.aws" + "url": "https://cleanrooms.us-east-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-gov-east-1.amazonaws.com" + "url": "https://cleanrooms.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false } @@ -105,101 +105,101 @@ } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://cleanrooms-fips.us-gov-east-1.api.aws" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://cleanrooms-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://cleanrooms.us-gov-east-1.api.aws" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-iso-east-1.c2s.ic.gov" + "url": "https://cleanrooms.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cleanrooms-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms-fips.us-east-1.amazonaws.com" + "url": "https://cleanrooms-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cleanrooms.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cleanrooms.us-east-1.amazonaws.com" + "url": "https://cleanrooms.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } @@ -253,7 +253,7 @@ } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -266,6 +266,19 @@ "Endpoint": "https://example.com" } }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, { "documentation": "For custom endpoint with fips enabled and dualstack disabled", "expect": { @@ -289,6 +302,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json index 5560200444a2..9c97b12fc3c1 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json @@ -674,7 +674,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Creates a protected query that is started by Clean Rooms .

    " + "documentation":"

    Creates a protected query that is started by Clean Rooms.

    " }, "TagResource":{ "name":"TagResource", @@ -939,7 +939,7 @@ "AllowedColumnList":{ "type":"list", "member":{"shape":"ColumnName"}, - "max":100, + "max":225, "min":1 }, "AnalysisFormat":{ @@ -2426,6 +2426,10 @@ "tags":{ "shape":"TagMap", "documentation":"

    An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

    " + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"

    The default protected query result configuration as specified by the member who can receive results.

    " } } }, @@ -2990,7 +2994,9 @@ }, "KeyPrefix":{ "type":"string", - "pattern":"[\\w!.*/-]*" + "max":512, + "min":0, + "pattern":"[\\w!.=*/-]*" }, "ListAnalysisTemplatesInput":{ "type":"structure", @@ -3532,6 +3538,10 @@ "queryLogStatus":{ "shape":"MembershipQueryLogStatus", "documentation":"

    An indicator as to whether query logging has been enabled or disabled for the collaboration.

    " + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"

    The default protected query result configuration as specified by the member who can receive results.

    " } }, "documentation":"

    The membership object.

    " @@ -3548,6 +3558,29 @@ "min":36, "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" }, + "MembershipProtectedQueryOutputConfiguration":{ + "type":"structure", + "members":{ + "s3":{"shape":"ProtectedQueryS3OutputConfiguration"} + }, + "documentation":"

    Contains configurations for protected query results.

    ", + "union":true + }, + "MembershipProtectedQueryResultConfiguration":{ + "type":"structure", + "required":["outputConfiguration"], + "members":{ + "outputConfiguration":{ + "shape":"MembershipProtectedQueryOutputConfiguration", + "documentation":"

    Configuration for protected query results.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The unique ARN for an IAM role that is used by Clean Rooms to write protected query results to the result location, given by the member who can receive results.

    " + } + }, + "documentation":"

    Contains configurations for protected query results.

    " + }, "MembershipQueryLogStatus":{ "type":"string", "enum":[ @@ -3678,9 +3711,7 @@ "membershipId", "membershipArn", "createTime", - "sqlParameters", - "status", - "resultConfiguration" + "status" ], "members":{ "id":{ @@ -3750,12 +3781,20 @@ "min":1, "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" }, + "ProtectedQueryMemberOutputList":{ + "type":"list", + "member":{"shape":"ProtectedQuerySingleMemberOutput"} + }, "ProtectedQueryOutput":{ "type":"structure", "members":{ "s3":{ "shape":"ProtectedQueryS3Output", "documentation":"

    If present, the output for a protected query with an `S3` output type.

    " + }, + "memberList":{ + "shape":"ProtectedQueryMemberOutputList", + "documentation":"

    The list of member Amazon Web Services account(s) that received the results of the query.

    " } }, "documentation":"

    Contains details about the protected query output.

    ", @@ -3857,6 +3896,17 @@ "max":15000, "min":0 }, + "ProtectedQuerySingleMemberOutput":{ + "type":"structure", + "required":["accountId"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

    The Amazon Web Services account ID of the member in the collaboration who can receive results for the query.

    " + } + }, + "documentation":"

    Details about the member who received the query result.

    " + }, "ProtectedQueryStatistics":{ "type":"structure", "members":{ @@ -4168,8 +4218,7 @@ "required":[ "type", "membershipIdentifier", - "sqlParameters", - "resultConfiguration" + "sqlParameters" ], "members":{ "type":{ @@ -4510,6 +4559,10 @@ "queryLogStatus":{ "shape":"MembershipQueryLogStatus", "documentation":"

    An indicator as to whether query logging has been enabled or disabled for the collaboration.

    " + }, + "defaultResultConfiguration":{ + "shape":"MembershipProtectedQueryResultConfiguration", + "documentation":"

    The default protected query result configuration as specified by the member who can receive results.

    " } } }, @@ -4608,5 +4661,5 @@ ] } }, - "documentation":"

    Welcome to the Clean Rooms API Reference.

    Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data.

    To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide.

    " + "documentation":"

    Welcome to the Clean Rooms API Reference.

    Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data.

    To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide.

    To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference.

    " } diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index a777b374e60b..2161d7e0808d 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json index fbaea21794bd..26763c88c971 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloud9/src/main/resources/codegen-resources/service-2.json b/services/cloud9/src/main/resources/codegen-resources/service-2.json index 2d2f489783d0..502337f92dc9 100644 --- a/services/cloud9/src/main/resources/codegen-resources/service-2.json +++ b/services/cloud9/src/main/resources/codegen-resources/service-2.json @@ -330,7 +330,7 @@ }, "imageId":{ "shape":"ImageId", - "documentation":"

    The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

    The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request. Because Amazon Linux AMI has ended standard support as of December 31, 2020, we recommend you choose Amazon Linux 2, which includes long term support through 2023.

    From December 31, 2023, the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.

    AMI aliases

    • Amazon Linux (default): amazonlinux-1-x86_64

    • Amazon Linux 2: amazonlinux-2-x86_64

    • Ubuntu 18.04: ubuntu-18.04-x86_64

    SSM paths

    • Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64

    • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

    • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

    " + "documentation":"

    The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

    The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request. Because Amazon Linux AMI has ended standard support as of December 31, 2020, we recommend you choose Amazon Linux 2, which includes long term support through 2023.

    From December 31, 2023, the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.

    AMI aliases

    • Amazon Linux (default): amazonlinux-1-x86_64

    • Amazon Linux 2: amazonlinux-2-x86_64

    • Ubuntu 18.04: ubuntu-18.04-x86_64

    • Ubuntu 22.04: ubuntu-22.04-x86_64

    SSM paths

    • Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64

    • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

    • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

    • Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64

    " }, "automaticStopTimeMinutes":{ "shape":"AutomaticStopTimeMinutes", diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index c6f585ad8ac9..8c44d2529da1 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 595227af12e9..3368bb0fa94a 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 3b4121c4d651..0bf138618987 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/customization.config b/services/cloudformation/src/main/resources/codegen-resources/customization.config index a88012d0b426..e7b4b0ab5bf6 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/customization.config +++ b/services/cloudformation/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "estimateTemplateCost", "validateTemplate", "getTemplate", diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 18d3adcb72b6..05cd7a98d04c 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 994b7e24db34..51ce389f6e89 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsm/src/main/resources/codegen-resources/customization.config b/services/cloudhsm/src/main/resources/codegen-resources/customization.config index df5ba4973980..7477c0a69fc1 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/customization.config +++ b/services/cloudhsm/src/main/resources/codegen-resources/customization.config @@ -14,7 +14,7 @@ "exclude": [ "retryable" ] } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "describeHsm", "describeLunaClient" ], diff --git a/services/cloudhsm/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudhsm/src/main/resources/codegen-resources/endpoint-rule-set.json index 807bc8fae2c0..a0cd821bf7f3 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudhsm/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://cloudhsm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://cloudhsm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -238,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudhsm.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://cloudhsm.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://cloudhsm.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudhsm.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudhsm/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudhsm/src/main/resources/codegen-resources/endpoint-tests.json index f460a39a0a0e..b114cead1fc2 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudhsm/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,107 @@ { "testCases": [ { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudhsm-fips.us-gov-west-1.api.aws" + "url": "https://cloudhsm.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudhsm-fips.us-gov-west-1.amazonaws.com" + "url": "https://cloudhsm-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudhsm.us-gov-west-1.api.aws" + "url": "https://cloudhsm-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,74 +112,183 @@ } }, "params": { - "UseDualStack": false, "Region": "us-gov-west-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudhsm-fips.us-east-1.api.aws" + "url": "https://cloudhsm-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudhsm-fips.us-east-1.amazonaws.com" + "url": "https://cloudhsm-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudhsm.us-east-1.api.aws" + "url": "https://cloudhsm.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudhsm.us-east-1.amazonaws.com" + "url": "https://cloudhsm.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cloudhsm.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -124,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -136,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudhsm/src/main/resources/codegen-resources/service-2.json b/services/cloudhsm/src/main/resources/codegen-resources/service-2.json index dc6f74aa78b1..541f1270b285 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudhsm/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Adds or overwrites one or more tags for the specified AWS CloudHSM resource.

    Each tag consists of a key and a value. Tag keys must be unique to each resource.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Adds or overwrites one or more tags for the specified AWS CloudHSM resource.

    Each tag consists of a key and a value. Tag keys must be unique to each resource.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "CreateHapg":{ "name":"CreateHapg", @@ -41,7 +43,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "CreateHsm":{ "name":"CreateHsm", @@ -56,7 +60,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates an uninitialized HSM instance.

    There is an upfront fee charged for each HSM instance that you create with the CreateHsm operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the DeleteHsm operation, go to the AWS Support Center, create a new case, and select Account and Billing Support.

    It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the DescribeHsm operation. The HSM is ready to be initialized when the status changes to RUNNING.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates an uninitialized HSM instance.

    There is an upfront fee charged for each HSM instance that you create with the CreateHsm operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the DeleteHsm operation, go to the AWS Support Center, create a new case, and select Account and Billing Support.

    It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the DescribeHsm operation. The HSM is ready to be initialized when the status changes to RUNNING.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "CreateLunaClient":{ "name":"CreateLunaClient", @@ -71,7 +77,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates an HSM client.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Creates an HSM client.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DeleteHapg":{ "name":"DeleteHapg", @@ -86,7 +94,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes a high-availability partition group.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes a high-availability partition group.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DeleteHsm":{ "name":"DeleteHsm", @@ -101,7 +111,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DeleteLunaClient":{ "name":"DeleteLunaClient", @@ -116,7 +128,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes a client.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Deletes a client.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DescribeHapg":{ "name":"DescribeHapg", @@ -131,7 +145,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about a high-availability partition group.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about a high-availability partition group.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DescribeHsm":{ "name":"DescribeHsm", @@ -146,7 +162,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about an HSM. You can identify the HSM by its ARN or its serial number.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about an HSM. You can identify the HSM by its ARN or its serial number.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "DescribeLunaClient":{ "name":"DescribeLunaClient", @@ -161,7 +179,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about an HSM client.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves information about an HSM client.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "GetConfig":{ "name":"GetConfig", @@ -176,7 +196,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ListAvailableZones":{ "name":"ListAvailableZones", @@ -191,7 +213,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists the Availability Zones that have available AWS CloudHSM capacity.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists the Availability Zones that have available AWS CloudHSM capacity.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ListHapgs":{ "name":"ListHapgs", @@ -206,7 +230,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists the high-availability partition groups for the account.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHapgs to retrieve the next set of items.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists the high-availability partition groups for the account.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHapgs to retrieve the next set of items.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ListHsms":{ "name":"ListHsms", @@ -221,7 +247,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves the identifiers of all of the HSMs provisioned for the current customer.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHsms to retrieve the next set of items.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Retrieves the identifiers of all of the HSMs provisioned for the current customer.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListHsms to retrieve the next set of items.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ListLunaClients":{ "name":"ListLunaClients", @@ -236,7 +264,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists all of the clients.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListLunaClients to retrieve the next set of items.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Lists all of the clients.

    This operation supports pagination with the use of the NextToken member. If more results are available, the NextToken member of the response contains a token that you pass in the next call to ListLunaClients to retrieve the next set of items.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -251,7 +281,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Returns a list of all tags for the specified AWS CloudHSM resource.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Returns a list of all tags for the specified AWS CloudHSM resource.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ModifyHapg":{ "name":"ModifyHapg", @@ -266,7 +298,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies an existing high-availability partition group.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies an existing high-availability partition group.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ModifyHsm":{ "name":"ModifyHsm", @@ -281,7 +315,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies an HSM.

    This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies an HSM.

    This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "ModifyLunaClient":{ "name":"ModifyLunaClient", @@ -294,7 +330,9 @@ "errors":[ {"shape":"CloudHsmServiceException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies the certificate used by the client.

    This action can potentially start a workflow to install the new certificate on the client's HSMs.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Modifies the certificate used by the client.

    This action can potentially start a workflow to install the new certificate on the client's HSMs.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -309,7 +347,9 @@ {"shape":"CloudHsmInternalException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Removes one or more tags from the specified AWS CloudHSM resource.

    To remove a tag, specify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource.

    " + "documentation":"

    This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference.

    For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference.

    Removes one or more tags from the specified AWS CloudHSM resource.

    To remove a tag, specify only the tag key to remove (not the value). To overwrite the value for an existing tag, use AddTagsToResource.

    ", + "deprecated":true, + "deprecatedMessage":"This API is deprecated." } }, "shapes":{ diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 14b47fac8eed..b2c902e48df0 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 3935738f819d..ffd492b48b7d 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 95d981900352..de926a0038f3 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index b1c5cfaaab3f..36248548ce31 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index 60f77595e8a0..de0513919413 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -158,7 +158,8 @@ {"shape":"OrganizationNotInAllFeaturesModeException"}, {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"CloudTrailInvalidClientTokenIdException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket.

    ", "idempotent":true @@ -235,6 +236,7 @@ {"shape":"InvalidTrailNameException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, @@ -393,7 +395,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"InsightNotEnabledException"}, - {"shape":"NoManagementAccountSLRExistsException"} + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Describes the settings for the Insights event selectors that you configured for your trail. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail, and if it is, which insight types are enabled. If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException

    For more information, see Logging CloudTrail Insights Events for Trails in the CloudTrail User Guide.

    ", "idempotent":true @@ -660,6 +663,7 @@ {"shape":"InvalidHomeRegionException"}, {"shape":"InvalidEventSelectorsException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, @@ -690,7 +694,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, - {"shape":"NoManagementAccountSLRExistsException"} + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

    To log CloudTrail Insights events on API call volume, the trail must log write management events. To log CloudTrail Insights events on API error rate, the trail must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events.

    ", "idempotent":true @@ -851,6 +856,7 @@ "errors":[ {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"TrailNotFoundException"}, {"shape":"InvalidTrailNameException"}, {"shape":"InvalidHomeRegionException"}, @@ -941,6 +947,7 @@ {"shape":"InvalidTrailNameException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, @@ -1029,6 +1036,7 @@ {"shape":"InvalidEventSelectorsException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"KmsKeyNotFoundException"}, @@ -4015,6 +4023,13 @@ "max":200 }, "TerminationProtectionEnabled":{"type":"boolean"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This exception is thrown when the request rate exceeds the limit.

    ", + "exception":true + }, "Trail":{ "type":"structure", "members":{ diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 43c6e00ade48..80e09f557e43 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index c482b63caf90..8003fcedf00f 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java index 01722140044f..8245d82a7ef5 100644 --- a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java +++ b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java @@ -39,8 +39,11 @@ import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.CompressionConfiguration; import software.amazon.awssdk.core.SdkGlobalTime; import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.model.Datapoint; import software.amazon.awssdk.services.cloudwatch.model.DeleteAlarmsRequest; @@ -108,7 +111,6 @@ public static void cleanupAlarms() { /** * Tests putting metrics and then getting them back. */ - @Test public void put_get_metricdata_list_metric_returns_success() throws InterruptedException { @@ -164,6 +166,86 @@ public void put_get_metricdata_list_metric_returns_success() throws assertTrue(seenDimensions); } + /** + * Tests putting metrics with request compression and then getting them back. + * TODO: We can remove this test once CloudWatch adds "RequestCompression" trait to PutMetricData + */ + @Test + public void put_get_metricdata_list_metric_withRequestCompression_returns_success() { + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(false) + .build(); + CompressionConfiguration compressionConfiguration = CompressionConfiguration.builder() + // uncompressed payload is 404 bytes + .minimumCompressionThresholdInBytes(100) + .build(); + + CloudWatchClient requestCompressionClient = + CloudWatchClient.builder() + .credentialsProvider(getCredentialsProvider()) + .region(Region.US_WEST_2) + .overrideConfiguration(c -> c.putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait)) + .build(); + + String measureName = this.getClass().getName() + System.currentTimeMillis(); + + MetricDatum datum = MetricDatum.builder().dimensions( + Dimension.builder().name("InstanceType").value("m1.small").build()) + .metricName(measureName).timestamp(Instant.now()) + .unit("Count").value(42.0).build(); + + requestCompressionClient.putMetricData(PutMetricDataRequest.builder() + .namespace("AWS.EC2") + .metricData(datum) + .overrideConfiguration(c -> c.compressionConfiguration(compressionConfiguration)) + .build()); + + GetMetricStatisticsResponse result = + Waiter.run(() -> requestCompressionClient + .getMetricStatistics(r -> r.startTime(Instant.now().minus(Duration.ofDays(7))) + .namespace("AWS.EC2") + .period(60 * 60) + .dimensions(Dimension.builder().name("InstanceType") + .value("m1.small").build()) + .metricName(measureName) + .statisticsWithStrings("Average", "Maximum", "Minimum", "Sum") + .endTime(Instant.now()))) + .until(r -> r.datapoints().size() == 1) + .orFailAfter(Duration.ofMinutes(2)); + + assertNotNull(result.label()); + assertEquals(measureName, result.label()); + + assertEquals(1, result.datapoints().size()); + for (Datapoint datapoint : result.datapoints()) { + assertEquals(datum.value(), datapoint.average()); + assertEquals(datum.value(), datapoint.maximum()); + assertEquals(datum.value(), datapoint.minimum()); + assertEquals(datum.value(), datapoint.sum()); + assertNotNull(datapoint.timestamp()); + assertEquals(datum.unit(), datapoint.unit()); + } + + ListMetricsResponse listResult = requestCompressionClient.listMetrics(ListMetricsRequest.builder().build()); + + boolean seenDimensions = false; + assertTrue(listResult.metrics().size() > 0); + for (Metric metric : listResult.metrics()) { + assertNotNull(metric.metricName()); + assertNotNull(metric.namespace()); + + for (Dimension dimension : metric.dimensions()) { + seenDimensions = true; + assertNotNull(dimension.name()); + assertNotNull(dimension.value()); + } + } + assertTrue(seenDimensions); + } + /** * Tests setting the state for an alarm and reading its history. */ diff --git a/services/cloudwatch/src/main/resources/codegen-resources/customization.config b/services/cloudwatch/src/main/resources/codegen-resources/customization.config index 5c580ec85631..d819241c3710 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatch/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "listDashboards", "listMetrics" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteDashboards", "putDashboard", "getDashboard" diff --git a/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json index 94f8f3988691..39504ccc8762 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://monitoring-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://monitoring-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://monitoring.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://monitoring-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://monitoring.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://monitoring-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://monitoring.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://monitoring.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://monitoring.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://monitoring.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index c3f0b4d5bd0b..a05476617237 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -1342,7 +1342,7 @@ }, "AlarmTypes":{ "shape":"AlarmTypes", - "documentation":"

    Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter, only metric alarms are returned.

    " + "documentation":"

    Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter, only metric alarms are returned, even if composite alarms exist in the account.

    For example, if you omit this parameter or specify MetricAlarms, the operation returns only a list of metric alarms. It does not return any composite alarms, even if composite alarms exist in the account.

    If you specify CompositeAlarms, the operation returns only a list of composite alarms, and does not return any metric alarms.

    " }, "ChildrenOfAlarmName":{ "shape":"AlarmName", @@ -1662,7 +1662,7 @@ }, "OrderBy":{ "shape":"InsightRuleOrderBy", - "documentation":"

    Determines what statistic to use to rank the contributors. Valid values are SUM and MAXIMUM.

    " + "documentation":"

    Determines what statistic to use to rank the contributors. Valid values are Sum and Maximum.

    " } } }, @@ -2376,7 +2376,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you want to view tags for.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you want to view tags for.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " } } }, @@ -3247,7 +3247,7 @@ }, "ExtendedStatistic":{ "shape":"ExtendedStatistic", - "documentation":"

    The percentile statistic for the metric specified in MetricName. Specify a value between p0.0 and p100. When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or ExtendedStatistic, but not both.

    " + "documentation":"

    The extended statistic for the metric specified in MetricName. When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or ExtendedStatistic but not both.

    If you specify ExtendedStatistic, the following are valid values:

    • p90

    • tm90

    • tc90

    • ts90

    • wm90

    • IQM

    • PR(n:m) where n and m are values of the metric

    • TC(X%:X%) where X is between 10 and 90 inclusive.

    • TM(X%:X%) where X is between 10 and 90 inclusive.

    • TS(X%:X%) where X is between 10 and 90 inclusive.

    • WM(X%:X%) where X is between 10 and 90 inclusive.

    For more information about these extended statistics, see CloudWatch statistics definitions.

    " }, "Dimensions":{ "shape":"Dimensions", @@ -3291,7 +3291,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

    A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    If you are using this operation to update an existing alarm, any tags you specify in this parameter are ignored. To change the tags of an existing alarm, use TagResource or UntagResource.

    " + "documentation":"

    A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the cloudwatch:TagResource permission.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    If you are using this operation to update an existing alarm, any tags you specify in this parameter are ignored. To change the tags of an existing alarm, use TagResource or UntagResource.

    " }, "ThresholdMetricId":{ "shape":"MetricId", @@ -3676,7 +3676,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you're adding tags to.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you're adding tags to.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " }, "Tags":{ "shape":"TagList", @@ -3720,7 +3720,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you're removing tags from.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you're removing tags from.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " }, "TagKeys":{ "shape":"TagKeyList", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index f17f1a9e29aa..c2ea86e5057f 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-rule-set.json index ee46f7c9d38a..439d2e2f6fe3 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://events-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://events-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -236,12 +230,12 @@ { "ref": "Region" }, - "us-gov-west-1" + "us-gov-east-1" ] } ], "endpoint": { - "url": "https://events.us-gov-west-1.amazonaws.com", + "url": "https://events.us-gov-east-1.amazonaws.com", "properties": {}, "headers": {} }, @@ -255,12 +249,12 @@ { "ref": "Region" }, - "us-gov-east-1" + "us-gov-west-1" ] } ], "endpoint": { - "url": "https://events.us-gov-east-1.amazonaws.com", + "url": "https://events.us-gov-west-1.amazonaws.com", "properties": {}, "headers": {} }, @@ -276,78 +270,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://events.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://events.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://events.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://events.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-tests.json index b5ab3b1fec3f..13e907543376 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-south-2.api.aws" + "url": "https://events.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-south-2.amazonaws.com" + "url": "https://events.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-south-1.api.aws" + "url": "https://events.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-south-1.amazonaws.com" + "url": "https://events.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-south-1.api.aws" + "url": "https://events.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,256 +73,48 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ca-central-1.api.aws" + "url": "https://events.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ca-central-1.amazonaws.com" + "url": "https://events.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ca-central-1.api.aws" + "url": "https://events.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -359,1353 +125,460 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.eu-central-1.amazonaws.com" + "url": "https://events.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, "Region": "eu-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.eu-central-1.api.aws" + "url": "https://events.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-central-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-gov-west-1.amazonaws.com" + "url": "https://events.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-1.api.aws" + "url": "https://events.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-1.amazonaws.com" + "url": "https://events.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-1.api.aws" + "url": "https://events.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-1.amazonaws.com" + "url": "https://events.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-2.api.aws" + "url": "https://events.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-2.amazonaws.com" + "url": "https://events.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-2.api.aws" + "url": "https://events-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-2.amazonaws.com" + "url": "https://events.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://events-fips.us-east-2.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://events.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://events-fips.us-west-1.amazonaws.com" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-iso-east-1.c2s.ic.gov" + "url": "https://events.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-3.api.aws" + "url": "https://events-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-3.amazonaws.com" + "url": "https://events-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-3.api.aws" + "url": "https://events.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-3.amazonaws.com" + "url": "https://events.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-4.api.aws" + "url": "https://events.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events-fips.ap-southeast-4.amazonaws.com" + "url": "https://events-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-4.api.aws" + "url": "https://events-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events.ap-southeast-4.amazonaws.com" + "url": "https://events.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.us-east-1.api.aws" + "url": "https://events.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.us-east-1.amazonaws.com" + "url": "https://events.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-east-1.api.aws" + "url": "https://events.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-east-1.amazonaws.com" + "url": "https://events.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events-fips.us-east-2.api.aws" + "url": "https://events-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://events-fips.us-east-2.amazonaws.com" + "url": "https://events.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-east-2.api.aws" + "url": "https://events.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.us-east-2.amazonaws.com" + "url": "https://events.us-iso-west-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://events-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://events-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://events.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://events.cn-northwest-1.amazonaws.com.cn" + "url": "https://events.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1714,9 +587,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -1727,9 +600,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -1738,35 +611,35 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://events.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1776,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1788,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json index 808feef9dadc..3a7897f179a5 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json @@ -633,7 +633,7 @@ {"shape":"ManagedRuleException"}, {"shape":"InternalException"} ], - "documentation":"

    Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

    Targets are the resources that are invoked when a rule is triggered.

    You can configure the following as targets for Events:

    • API destination

    • Amazon API Gateway REST API endpoints

    • API Gateway

    • Batch job queue

    • CloudWatch Logs group

    • CodeBuild project

    • CodePipeline

    • Amazon EC2 CreateSnapshot API call

    • Amazon EC2 RebootInstances API call

    • Amazon EC2 StopInstances API call

    • Amazon EC2 TerminateInstances API call

    • Amazon ECS tasks

    • Event bus in a different Amazon Web Services account or Region.

      You can use an event bus in the US East (N. Virginia) us-east-1, US West (Oregon) us-west-2, or Europe (Ireland) eu-west-1 Regions as a target for a rule.

    • Firehose delivery stream (Kinesis Data Firehose)

    • Inspector assessment template (Amazon Inspector)

    • Kinesis stream (Kinesis Data Stream)

    • Lambda function

    • Redshift clusters (Data API statement execution)

    • Amazon SNS topic

    • Amazon SQS queues (includes FIFO queues

    • SSM Automation

    • SSM OpsItem

    • SSM Run Command

    • Step Functions state machines

    Creating rules with built-in targets is supported only in the Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

    For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

    To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

    If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing.

    Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different Amazon Web Services account.

    If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

    For more information about enabling cross-account events, see PutPermission.

    Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

    • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

    • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

    • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

    • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

    When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

    When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

    This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

    " + "documentation":"

    Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

    Targets are the resources that are invoked when a rule is triggered.

    You can configure the following as targets for Events:

    • API destination

    • Amazon API Gateway REST API endpoints

    • API Gateway

    • Batch job queue

    • CloudWatch Logs group

    • CodeBuild project

    • CodePipeline

    • Amazon EC2 CreateSnapshot API call

    • Amazon EC2 RebootInstances API call

    • Amazon EC2 StopInstances API call

    • Amazon EC2 TerminateInstances API call

    • Amazon ECS tasks

    • Event bus in a different Amazon Web Services account or Region.

      You can use an event bus in the US East (N. Virginia) us-east-1, US West (Oregon) us-west-2, or Europe (Ireland) eu-west-1 Regions as a target for a rule.

    • Firehose delivery stream (Kinesis Data Firehose)

    • Inspector assessment template (Amazon Inspector)

    • Kinesis stream (Kinesis Data Stream)

    • Lambda function

    • Redshift clusters (Data API statement execution)

    • Amazon SNS topic

    • Amazon SQS queues (includes FIFO queues

    • SSM Automation

    • SSM OpsItem

    • SSM Run Command

    • Step Functions state machines

    Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

    For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

    To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions. For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

    If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing.

    Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different Amazon Web Services account.

    If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

    For more information about enabling cross-account events, see PutPermission.

    Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

    • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

    • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

    • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

    • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

    When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

    When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

    This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

    " }, "RemovePermission":{ "name":"RemovePermission", diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index ffc4d44d20a2..436815aa5c34 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config index dc10a538fc44..f61c7f5add4a 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "deleteResourcePolicy", "putResourcePolicy" ], diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 23cf96e5b2b2..46673af12feb 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 23c6afa5bc16..802324e09f48 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index e6c1b53be738..e2b19a006bf4 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index de0becc06b96..b659a2897721 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codecommit/src/main/resources/codegen-resources/customization.config b/services/codecommit/src/main/resources/codegen-resources/customization.config index ca10e18215bd..18a06a2c508b 100644 --- a/services/codecommit/src/main/resources/codegen-resources/customization.config +++ b/services/codecommit/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listRepositories" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getBranch" ] } diff --git a/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json index 09eaf8c3674f..96a72fcdcaa3 100644 --- a/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://codecommit-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://codecommit-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -238,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codecommit.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://codecommit.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://codecommit.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://codecommit.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json b/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json index 3c91d4fa56eb..af0933d991fd 100644 --- a/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-2.api.aws" + "url": "https://codecommit.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-2.amazonaws.com" + "url": "https://codecommit.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-south-1.api.aws" + "url": "https://codecommit.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-south-1.amazonaws.com" + "url": "https://codecommit.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-1.api.aws" + "url": "https://codecommit.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,204 +73,35 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ca-central-1.amazonaws.com" + "url": "https://codecommit.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ca-central-1.api.aws" + "url": "https://codecommit.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,1166 +112,521 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.sa-east-1.amazonaws.com" + "url": "https://codecommit-fips.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.sa-east-1.api.aws" + "url": "https://codecommit.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.sa-east-1.amazonaws.com" + "url": "https://codecommit.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-east-1.api.aws" + "url": "https://codecommit.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-east-1.amazonaws.com" + "url": "https://codecommit.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-east-1.api.aws" + "url": "https://codecommit.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-east-1.amazonaws.com" + "url": "https://codecommit.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-north-1.amazonaws.com.cn" + "url": "https://codecommit.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-north-1.amazonaws.com.cn" + "url": "https://codecommit-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-gov-west-1.api.aws" + "url": "https://codecommit.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-gov-west-1.amazonaws.com" + "url": "https://codecommit-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-gov-west-1.api.aws" + "url": "https://codecommit.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-gov-west-1.amazonaws.com" + "url": "https://codecommit-fips.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-1.api.aws" + "url": "https://codecommit.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-1.amazonaws.com" + "url": "https://codecommit-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-1.api.aws" + "url": "https://codecommit-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-1.amazonaws.com" + "url": "https://codecommit.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-2.api.aws" + "url": "https://codecommit.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-2.amazonaws.com" + "url": "https://codecommit.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-2.api.aws" + "url": "https://codecommit-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-2.amazonaws.com" + "url": "https://codecommit-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-3.api.aws" + "url": "https://codecommit.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-3.amazonaws.com" + "url": "https://codecommit.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-3.api.aws" + "url": "https://codecommit-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-3.amazonaws.com" + "url": "https://codecommit.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-1.api.aws" + "url": "https://codecommit-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-1.amazonaws.com" + "url": "https://codecommit-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.us-east-1.api.aws" + "url": "https://codecommit.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-2.api.aws" + "url": "https://codecommit-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-east-2.api.aws" + "url": "https://codecommit.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://codecommit-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1476,9 +636,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1488,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json index ab4bae4cd486..f363a39f6e3b 100644 --- a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,11 @@ "output_token": "nextToken", "result_key": "branches" }, + "ListFileCommitHistory": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, "ListPullRequests": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/services/codecommit/src/main/resources/codegen-resources/service-2.json b/services/codecommit/src/main/resources/codegen-resources/service-2.json index 95dd51101594..74ea48996c62 100644 --- a/services/codecommit/src/main/resources/codegen-resources/service-2.json +++ b/services/codecommit/src/main/resources/codegen-resources/service-2.json @@ -172,7 +172,7 @@ {"shape":"InvalidApprovalRuleTemplateDescriptionException"}, {"shape":"NumberOfRuleTemplatesExceededException"} ], - "documentation":"

    Creates a template for approval rules that can then be associated with one or more repositories in your AWS account. When you associate a template with a repository, AWS CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.

    " + "documentation":"

    Creates a template for approval rules that can then be associated with one or more repositories in your Amazon Web Services account. When you associate a template with a repository, CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.

    " }, "CreateBranch":{ "name":"CreateBranch", @@ -1084,7 +1084,7 @@ {"shape":"InvalidMaxResultsException"}, {"shape":"InvalidContinuationTokenException"} ], - "documentation":"

    Lists all approval rule templates in the specified AWS Region in your AWS account. If an AWS Region is not specified, the AWS Region where you are signed in is used.

    " + "documentation":"

    Lists all approval rule templates in the specified Amazon Web Services Region in your Amazon Web Services account. If an Amazon Web Services Region is not specified, the Amazon Web Services Region where you are signed in is used.

    " }, "ListAssociatedApprovalRuleTemplatesForRepository":{ "name":"ListAssociatedApprovalRuleTemplatesForRepository", @@ -1129,6 +1129,32 @@ ], "documentation":"

    Gets information about one or more branches in a repository.

    " }, + "ListFileCommitHistory":{ + "name":"ListFileCommitHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFileCommitHistoryRequest"}, + "output":{"shape":"ListFileCommitHistoryResponse"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"TipsDivergenceExceededException"}, + {"shape":"CommitRequiredException"}, + {"shape":"InvalidCommitException"}, + {"shape":"CommitDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

    Retrieves a list of commits and changes to a specified file.

    " + }, "ListPullRequests":{ "name":"ListPullRequests", "http":{ @@ -1205,7 +1231,7 @@ {"shape":"ResourceArnRequiredException"}, {"shape":"InvalidResourceArnException"} ], - "documentation":"

    Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "MergeBranchesByFastForward":{ "name":"MergeBranchesByFastForward", @@ -1654,7 +1680,7 @@ {"shape":"DirectoryNameConflictsWithFileNameException"}, {"shape":"FilePathConflictsWithSubmodulePathException"} ], - "documentation":"

    Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.

    " + "documentation":"

    Adds or updates a file in a branch in an CodeCommit repository, and generates a commit for the addition in the specified branch.

    " }, "PutRepositoryTriggers":{ "name":"PutRepositoryTriggers", @@ -1707,7 +1733,7 @@ {"shape":"InvalidSystemTagUsageException"}, {"shape":"TagPolicyException"} ], - "documentation":"

    Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Adds or updates tags for a resource in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "TestRepositoryTriggers":{ "name":"TestRepositoryTriggers", @@ -1760,7 +1786,7 @@ {"shape":"InvalidSystemTagUsageException"}, {"shape":"TagPolicyException"} ], - "documentation":"

    Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Removes tags for a resource in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "UpdateApprovalRuleTemplateContent":{ "name":"UpdateApprovalRuleTemplateContent", @@ -1999,7 +2025,7 @@ {"shape":"RepositoryNameRequiredException"}, {"shape":"InvalidRepositoryNameException"} ], - "documentation":"

    Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide.

    " + "documentation":"

    Renames a repository. The repository name must be unique across the calling Amazon Web Services account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Quotas in the CodeCommit User Guide.

    " } }, "shapes":{ @@ -2008,7 +2034,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the AWS account.

    ", + "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the Amazon Web Services account.

    ", "exception":true }, "AdditionalData":{"type":"string"}, @@ -2198,7 +2224,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the AWS Region where the template was created, and then try again.

    ", + "documentation":"

    The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the Amazon Web Services Region where the template was created, and then try again.

    ", "exception":true }, "ApprovalRuleTemplateId":{"type":"string"}, @@ -2218,7 +2244,7 @@ "type":"structure", "members":{ }, - "documentation":"

    You cannot create an approval rule template with that name because a template with that name already exists in this AWS Region for your AWS account. Approval rule template names must be unique.

    ", + "documentation":"

    You cannot create an approval rule template with that name because a template with that name already exists in this Amazon Web Services Region for your Amazon Web Services account. Approval rule template names must be unique.

    ", "exception":true }, "ApprovalRuleTemplateNameList":{ @@ -2295,7 +2321,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the AWS account.

    ", + "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the Amazon Web Services account.

    ", "exception":true }, "BatchAssociateApprovalRuleTemplateWithRepositoriesError":{ @@ -2783,7 +2809,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The comment is too large. Comments are limited to 1,000 characters.

    ", + "documentation":"

    The comment is too large. Comments are limited to 10,240 characters.

    ", "exception":true }, "CommentDeletedException":{ @@ -3116,7 +3142,7 @@ }, "approvalRuleTemplateContent":{ "shape":"ApprovalRuleTemplateContent", - "documentation":"

    The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

    When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

    When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " }, "approvalRuleTemplateDescription":{ "shape":"ApprovalRuleTemplateDescription", @@ -3249,7 +3275,7 @@ }, "approvalRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

    The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.

    When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the CodeCommit User Guide.

    When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " } } }, @@ -3284,7 +3310,7 @@ }, "clientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

    A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

    The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.

    ", + "documentation":"

    A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

    The Amazon Web ServicesSDKs prepopulate client request tokens. If you are using an Amazon Web ServicesSDK, an idempotency token is created for you.

    ", "idempotencyToken":true } } @@ -3305,7 +3331,7 @@ "members":{ "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the new repository to be created.

    The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix .git is prohibited.

    " + "documentation":"

    The name of the new repository to be created.

    The repository name must be unique across the calling Amazon Web Services account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Quotas in the CodeCommit User Guide. The suffix .git is prohibited.

    " }, "repositoryDescription":{ "shape":"RepositoryDescription", @@ -4025,9 +4051,28 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified file exceeds the file size limit for AWS CodeCommit. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The specified file exceeds the file size limit for CodeCommit. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, + "FileVersion":{ + "type":"structure", + "members":{ + "commit":{"shape":"Commit"}, + "blobId":{ + "shape":"ObjectId", + "documentation":"

    The blob ID of the object that represents the content of the file in this version.

    " + }, + "path":{ + "shape":"Path", + "documentation":"

    The name and path of the file at which this blob is indexed which contains the data for this version of the file. This value will vary between file versions if a file is renamed or if its path changes.

    " + }, + "revisionChildren":{ + "shape":"RevisionChildren", + "documentation":"

    An array of commit IDs that contain more recent versions of this file. If there are no additional versions of the file, this array will be empty.

    " + } + }, + "documentation":"

    Information about a version of a file.

    " + }, "FilesMetadata":{ "type":"list", "member":{"shape":"FileMetadata"} @@ -4248,15 +4293,15 @@ }, "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the repository that contains the pull request.

    " + "documentation":"

    The name of the repository that contains the pull request. Requirement is conditional: repositoryName must be specified when beforeCommitId and afterCommitId are included.

    " }, "beforeCommitId":{ "shape":"CommitId", - "documentation":"

    The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.

    " + "documentation":"

    The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created. Requirement is conditional: beforeCommitId must be specified when repositoryName is included.

    " }, "afterCommitId":{ "shape":"CommitId", - "documentation":"

    The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made.

    " + "documentation":"

    The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made. Requirement is conditional: afterCommitId must be specified when repositoryName is included.

    " }, "nextToken":{ "shape":"NextToken", @@ -4373,7 +4418,7 @@ }, "commitSpecifier":{ "shape":"CommitName", - "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, the head commit is used.

    " + "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/main. If none is provided, the head commit is used.

    " }, "filePath":{ "shape":"Path", @@ -4824,14 +4869,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, "InvalidApprovalRuleTemplateNameException":{ "type":"structure", "members":{ }, - "documentation":"

    The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, "InvalidApprovalStateException":{ @@ -5062,14 +5107,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The value of the reaction is not valid. For more information, see the AWS CodeCommit User Guide.

    ", + "documentation":"

    The value of the reaction is not valid. For more information, see the CodeCommit User Guide.

    ", "exception":true }, "InvalidReferenceNameException":{ "type":"structure", "members":{ }, - "documentation":"

    The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/master). For more information, see Git Internals - Git References or consult your Git documentation.

    ", + "documentation":"

    The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/main). For more information, see Git Internals - Git References or consult your Git documentation.

    ", "exception":true }, "InvalidRelativeFileVersionEnumException":{ @@ -5146,14 +5191,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The AWS Region for the trigger target does not match the AWS Region for the repository. Triggers must be created in the same Region as the target for the trigger.

    ", + "documentation":"

    The Amazon Web Services Region for the trigger target does not match the Amazon Web Services Region for the repository. Triggers must be created in the same Amazon Web Services Region as the target for the trigger.

    ", "exception":true }, "InvalidResourceArnException":{ "type":"structure", "members":{ }, - "documentation":"

    The value for the resource ARN is not valid. For more information about resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    ", + "documentation":"

    The value for the resource ARN is not valid. For more information about resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    ", "exception":true }, "InvalidRevisionIdException":{ @@ -5284,7 +5329,7 @@ "members":{ "approvalRuleTemplateNames":{ "shape":"ApprovalRuleTemplateNameList", - "documentation":"

    The names of all the approval rule templates found in the AWS Region for your AWS account.

    " + "documentation":"

    The names of all the approval rule templates found in the Amazon Web Services Region for your Amazon Web Services account.

    " }, "nextToken":{ "shape":"NextToken", @@ -5352,6 +5397,49 @@ }, "documentation":"

    Represents the output of a list branches operation.

    " }, + "ListFileCommitHistoryRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "filePath" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the file.

    " + }, + "commitSpecifier":{ + "shape":"CommitName", + "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/main. If none is provided, the head commit is used.

    " + }, + "filePath":{ + "shape":"Path", + "documentation":"

    The full path of the file whose history you want to retrieve, including the name of the file.

    " + }, + "maxResults":{ + "shape":"Limit", + "documentation":"

    A non-zero, non-negative integer used to limit the number of returned results.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that allows the operation to batch the results.

    " + } + } + }, + "ListFileCommitHistoryResponse":{ + "type":"structure", + "required":["revisionDag"], + "members":{ + "revisionDag":{ + "shape":"RevisionDag", + "documentation":"

    An array of FileVersion objects that form a directed acyclic graph (DAG) of the changes to the file made by the commits that changed the file.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that can be used to return the next batch of results.

    " + } + } + }, "ListPullRequestsInput":{ "type":"structure", "required":["repositoryName"], @@ -5428,7 +5516,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to CodeCommit, another page of 1,000 records is retrieved.

    " }, "sortBy":{ "shape":"SortByEnum", @@ -5450,7 +5538,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to CodeCommit, another page of 1,000 records is retrieved.

    " } }, "documentation":"

    Represents the output of a list repositories operation.

    " @@ -6027,7 +6115,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The maximum number of approval rule templates has been exceeded for this AWS Region.

    ", + "documentation":"

    The maximum number of approval rule templates has been exceeded for this Amazon Web Services Region.

    ", "exception":true }, "NumberOfRulesExceededException":{ @@ -6644,7 +6732,7 @@ }, "reactionValue":{ "shape":"ReactionValue", - "documentation":"

    The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information about emoji reaction values supported in AWS CodeCommit, see the AWS CodeCommit User Guide.

    " + "documentation":"

    The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information about emoji reaction values supported in CodeCommit, see the CodeCommit User Guide.

    " } } }, @@ -6833,7 +6921,7 @@ "documentation":"

    The Unicode codepoint for the reaction.

    " } }, - "documentation":"

    Information about the values for reactions to a comment. AWS CodeCommit supports a limited set of reactions.

    " + "documentation":"

    Information about the values for reactions to a comment. CodeCommit supports a limited set of reactions.

    " }, "ReactionValueRequiredException":{ "type":"structure", @@ -6952,7 +7040,7 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

    The ID of the AWS account associated with the repository.

    " + "documentation":"

    The ID of the Amazon Web Services account associated with the repository.

    " }, "repositoryId":{ "shape":"RepositoryId", @@ -7086,7 +7174,7 @@ "documentation":"

    The repository events that cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.

    The valid value \"all\" cannot be used with any other values.

    " } }, - "documentation":"

    Information about a trigger for a repository.

    " + "documentation":"

    Information about a trigger for a repository.

    If you want to receive notifications about repository events, consider using notifications instead of triggers. For more information, see Configuring notifications for repository events.

    " }, "RepositoryTriggerBranchNameListRequiredException":{ "type":"structure", @@ -7171,7 +7259,7 @@ "type":"structure", "members":{ }, - "documentation":"

    A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    ", + "documentation":"

    A valid Amazon Resource Name (ARN) for an CodeCommit resource is required. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    ", "exception":true }, "RestrictedSourceFileException":{ @@ -7181,6 +7269,14 @@ "documentation":"

    The commit cannot be created because one of the changes specifies copying or moving a .gitkeep file.

    ", "exception":true }, + "RevisionChildren":{ + "type":"list", + "member":{"shape":"RevisionId"} + }, + "RevisionDag":{ + "type":"list", + "member":{"shape":"FileVersion"} + }, "RevisionId":{"type":"string"}, "RevisionIdRequiredException":{ "type":"structure", @@ -7475,7 +7571,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The maximum number of tags for an AWS CodeCommit resource has been exceeded.

    ", + "documentation":"

    The maximum number of tags for an CodeCommit resource has been exceeded.

    ", "exception":true }, "UntagResourceInput":{ @@ -7612,11 +7708,11 @@ "members":{ "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the repository to set or change the default branch for.

    " + "documentation":"

    The name of the repository for which you want to set or change the default branch.

    " }, "defaultBranchName":{ "shape":"BranchName", - "documentation":"

    The name of the branch to set as the default.

    " + "documentation":"

    The name of the branch to set as the default branch.

    " } }, "documentation":"

    Represents the input of an update default branch operation.

    " @@ -7643,7 +7739,7 @@ }, "newRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

    The updated content for the approval rule.

    When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The updated content for the approval rule.

    When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " } } }, @@ -7813,5 +7909,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

    This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

    You can use the AWS CodeCommit API to work with the following objects:

    Repositories, by calling the following:

    • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

    • CreateRepository, which creates an AWS CodeCommit repository.

    • DeleteRepository, which deletes an AWS CodeCommit repository.

    • GetRepository, which returns information about a specified repository.

    • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

    • UpdateRepositoryDescription, which sets or updates the description of the repository.

    • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

    Branches, by calling the following:

    • CreateBranch, which creates a branch in a specified repository.

    • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

    • GetBranch, which returns information about a specified branch.

    • ListBranches, which lists all branches for a specified repository.

    • UpdateDefaultBranch, which changes the default branch for a repository.

    Files, by calling the following:

    • DeleteFile, which deletes the content of a specified file from a specified branch.

    • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

    • GetFile, which returns the base-64 encoded content of a specified file.

    • GetFolder, which returns the contents of a specified folder or directory.

    • PutFile, which adds or modifies a single file in a specified repository and branch.

    Commits, by calling the following:

    • BatchGetCommits, which returns information about one or more commits in a repository.

    • CreateCommit, which creates a commit for changes to a repository.

    • GetCommit, which returns information about a commit, including commit messages and author and committer information.

    • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

    Merges, by calling the following:

    • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

    • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

    • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

    • GetMergeCommit, which returns information about the merge between a source and destination commit.

    • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

    • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

    • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

    • MergeBranchesBySquash, which merges two branches using the squash merge option.

    • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

    Pull requests, by calling the following:

    Approval rule templates, by calling the following:

    Comments in a repository, by calling the following:

    Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

    • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

    • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

    • UntagResource, which removes tags for a resource in AWS CodeCommit.

    Triggers, by calling the following:

    • GetRepositoryTriggers, which returns information about triggers configured for a repository.

    • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

    • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

    For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

    " + "documentation":"CodeCommit

    This is the CodeCommit API Reference. This reference provides descriptions of the operations and data types for CodeCommit API along with usage examples.

    You can use the CodeCommit API to work with the following objects:

    Repositories, by calling the following:

    • BatchGetRepositories, which returns information about one or more repositories associated with your Amazon Web Services account.

    • CreateRepository, which creates an CodeCommit repository.

    • DeleteRepository, which deletes an CodeCommit repository.

    • GetRepository, which returns information about a specified repository.

    • ListRepositories, which lists all CodeCommit repositories associated with your Amazon Web Services account.

    • UpdateRepositoryDescription, which sets or updates the description of the repository.

    • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

    Branches, by calling the following:

    • CreateBranch, which creates a branch in a specified repository.

    • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

    • GetBranch, which returns information about a specified branch.

    • ListBranches, which lists all branches for a specified repository.

    • UpdateDefaultBranch, which changes the default branch for a repository.

    Files, by calling the following:

    • DeleteFile, which deletes the content of a specified file from a specified branch.

    • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

    • GetFile, which returns the base-64 encoded content of a specified file.

    • GetFolder, which returns the contents of a specified folder or directory.

    • ListFileCommitHistory, which retrieves a list of commits and changes to a specified file.

    • PutFile, which adds or modifies a single file in a specified repository and branch.

    Commits, by calling the following:

    • BatchGetCommits, which returns information about one or more commits in a repository.

    • CreateCommit, which creates a commit for changes to a repository.

    • GetCommit, which returns information about a commit, including commit messages and author and committer information.

    • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

    Merges, by calling the following:

    • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

    • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

    • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

    • GetMergeCommit, which returns information about the merge between a source and destination commit.

    • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

    • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

    • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

    • MergeBranchesBySquash, which merges two branches using the squash merge option.

    • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

    Pull requests, by calling the following:

    Approval rule templates, by calling the following:

    Comments in a repository, by calling the following:

    Tags used to tag resources in CodeCommit (not Git tags), by calling the following:

    • ListTagsForResource, which gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) in CodeCommit.

    • TagResource, which adds or updates tags for a resource in CodeCommit.

    • UntagResource, which removes tags for a resource in CodeCommit.

    Triggers, by calling the following:

    • GetRepositoryTriggers, which returns information about triggers configured for a repository.

    • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

    • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

    For information about how to use CodeCommit, see the CodeCommit User Guide.

    " } diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index f04590778f6a..d1e27b0acf24 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codedeploy/src/main/resources/codegen-resources/customization.config b/services/codedeploy/src/main/resources/codegen-resources/customization.config index 9ef3b906a84e..3fa976b82eec 100644 --- a/services/codedeploy/src/main/resources/codegen-resources/customization.config +++ b/services/codedeploy/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "listGitHubAccountTokenNames", "listOnPremisesInstances" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "continueDeployment", "skipWaitTimeForInstanceTermination", "updateApplication", diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 96148f9a33ae..786afe19ab8c 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 9ee676d8e62c..c3f22c985941 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index 11e32ba49d70..00540d78411e 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 7cd9396534ca..b8c884a9cbd2 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/customization.config b/services/codepipeline/src/main/resources/codegen-resources/customization.config index aeb5a89163ee..2239d96d2582 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/customization.config +++ b/services/codepipeline/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listPipelines", "listWebhooks" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deregisterWebhookWithThirdParty", "registerWebhookWithThirdParty" ] diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 59c442de3026..a6f30db40b00 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 651cc62b507f..100832171dc0 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index facbf5627655..fea6f31109eb 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index da7f4390ea1e..9866dc528ded 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 79d41a01b634..2949dea8bb99 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config index e8cd40c8e333..0a61574c89d1 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "associateSoftwareToken" ], "shapeModifiers" : { diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json index e6566d99ff99..0f514686ef34 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cognito-idp.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cognito-idp.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cognito-idp.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index e2356443cd4d..2a7f59f92404 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -534,7 +534,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

    This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

    If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

    Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

    For custom attributes, you must prepend the custom: prefix to the attribute name.

    In addition to updating user attributes, this API can also be used to mark phone and email as verified.

    Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

    Learn more

    " + "documentation":"

    This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

    If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

    Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value.

    For custom attributes, you must prepend the custom: prefix to the attribute name.

    In addition to updating user attributes, this API can also be used to mark phone and email as verified.

    Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

    Learn more

    " }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -571,7 +571,8 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

    Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

    After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

    Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

    After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ChangePassword":{ "name":"ChangePassword", @@ -620,7 +621,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Confirms tracking of the device. This API call is the call that begins device tracking.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Confirms tracking of the device. This API call is the call that begins device tracking.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ConfirmForgotPassword":{ "name":"ConfirmForgotPassword", @@ -1094,7 +1096,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Forgets the specified device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Forgets the specified device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ForgotPassword":{ "name":"ForgotPassword", @@ -1161,7 +1164,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Gets the device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Gets the device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "GetGroup":{ "name":"GetGroup", @@ -1333,7 +1337,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

    Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

    Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "InitiateAuth":{ "name":"InitiateAuth", @@ -1383,7 +1388,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Lists the sign-in devices that Amazon Cognito has registered to the current user.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Lists the sign-in devices that Amazon Cognito has registered to the current user.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ListGroups":{ "name":"ListGroups", @@ -1616,7 +1622,8 @@ {"shape":"UnsupportedTokenTypeException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "SetLogDeliveryConfiguration":{ "name":"SetLogDeliveryConfiguration", @@ -1690,7 +1697,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "SetUserPoolMfaConfig":{ "name":"SetUserPoolMfaConfig", @@ -1847,7 +1855,8 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

    Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "UpdateDeviceStatus":{ "name":"UpdateDeviceStatus", @@ -1869,7 +1878,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Updates the device status.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Updates the device status.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "UpdateGroup":{ "name":"UpdateGroup", @@ -2040,7 +2050,8 @@ {"shape":"CodeMismatchException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "VerifyUserAttribute":{ "name":"VerifyUserAttribute", @@ -3394,7 +3405,8 @@ "ChallengeResponsesType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"StringType"} + "value":{"shape":"StringType"}, + "sensitive":true }, "ChangePasswordRequest":{ "type":"structure", @@ -3464,7 +3476,7 @@ "members":{ "LogGroupArn":{ "shape":"ArnType", - "documentation":"

    The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

    " + "documentation":"

    The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

    To send logs to log groups with a resource policy of a size greater than 5120 characters, configure a log group with a path that starts with /aws/vendedlogs. For more information, see Enabling logging from certain Amazon Web Services services.

    " } }, "documentation":"

    The CloudWatch logging destination of a user pool detailed activity logging configuration.

    " @@ -3788,7 +3800,7 @@ "documentation":"

    The user pool ID.

    " }, "ProviderName":{ - "shape":"ProviderNameTypeV1", + "shape":"ProviderNameTypeV2", "documentation":"

    The IdP name.

    " }, "ProviderType":{ @@ -6189,6 +6201,7 @@ }, "PaginationKey":{ "type":"string", + "max":131072, "min":1, "pattern":"[\\S]+" }, @@ -6317,13 +6330,13 @@ "type":"string", "max":32, "min":1, - "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\p{Z}]+" }, - "ProviderNameTypeV1":{ + "ProviderNameTypeV2":{ "type":"string", "max":32, - "min":3, - "pattern":"[^_][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_]+" + "min":1, + "pattern":"[^_\\p{Z}][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_\\p{Z}]+" }, "ProviderUserIdentifierType":{ "type":"structure", @@ -6793,7 +6806,8 @@ "SessionType":{ "type":"string", "max":2048, - "min":20 + "min":20, + "sensitive":true }, "SetLogDeliveryConfigurationRequest":{ "type":"structure", @@ -7100,7 +7114,8 @@ "type":"string", "max":6, "min":6, - "pattern":"[0-9]+" + "pattern":"[0-9]+", + "sensitive":true }, "SoftwareTokenMfaConfigType":{ "type":"structure", @@ -7875,7 +7890,8 @@ "documentation":"

    Encoded device-fingerprint details that your app collected with the Amazon Cognito context data collection library. For more information, see Adding user device and session data to API requests.

    " } }, - "documentation":"

    Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

    " + "documentation":"

    Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

    ", + "sensitive":true }, "UserFilterType":{ "type":"string", diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index fab179973a38..48507a1fc411 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index f873a7dc5c55..aa25e1238ae4 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 8881a0c37948..5ae548f1ed2f 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 680c77930023..57900201b415 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json index 41a58051cf6e..dfdfa8621725 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://compute-optimizer.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://compute-optimizer.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://compute-optimizer.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 53a841b3d833..68118cd51dc8 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -153,6 +153,26 @@ ], "documentation":"

    Exports optimization recommendations for Lambda functions.

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Lambda function export job in progress per Amazon Web Services Region.

    " }, + "ExportLicenseRecommendations":{ + "name":"ExportLicenseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportLicenseRecommendationsRequest"}, + "output":{"shape":"ExportLicenseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Export optimization recommendations for your licenses.

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one license export job in progress per Amazon Web Services Region.

    " + }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", "http":{ @@ -349,6 +369,26 @@ ], "documentation":"

    Returns Lambda function recommendations.

    Compute Optimizer generates recommendations for functions that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " }, + "GetLicenseRecommendations":{ + "name":"GetLicenseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLicenseRecommendationsRequest"}, + "output":{"shape":"GetLicenseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns license recommendations for Amazon EC2 instances that run on a specific license.

    Compute Optimizer generates recommendations for licenses that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " + }, "GetRecommendationPreferences":{ "name":"GetRecommendationPreferences", "http":{ @@ -553,6 +593,10 @@ "inferredWorkloadTypes":{ "shape":"InferredWorkloadTypes", "documentation":"

    The applications that might be running on the instances in the Auto Scaling group as inferred by Compute Optimizer.

    Compute Optimizer can infer if one of the following applications might be running on the instances:

    • AmazonEmr - Infers that Amazon EMR might be running on the instances.

    • ApacheCassandra - Infers that Apache Cassandra might be running on the instances.

    • ApacheHadoop - Infers that Apache Hadoop might be running on the instances.

    • Memcached - Infers that Memcached might be running on the instances.

    • NGINX - Infers that NGINX might be running on the instances.

    • PostgreSql - Infers that PostgreSQL might be running on the instances.

    • Redis - Infers that Redis might be running on the instances.

    • Kafka - Infers that Kafka might be running on the instance.

    • SQLServer - Infers that SQLServer might be running on the instance.

    " + }, + "currentInstanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

    Describes the GPU accelerator settings for the current instance type of the Auto Scaling group.

    " } }, "documentation":"

    Describes an Auto Scaling group recommendation.

    " @@ -583,6 +627,10 @@ "migrationEffort":{ "shape":"MigrationEffort", "documentation":"

    The level of effort required to migrate from the current instance type to the recommended instance type.

    For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

    " + }, + "instanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

    Describes the GPU accelerator settings for the recommended instance type of the Auto Scaling group.

    " } }, "documentation":"

    Describes a recommendation option for an Auto Scaling group.

    " @@ -1346,6 +1394,43 @@ "s3Destination":{"shape":"S3Destination"} } }, + "ExportLicenseRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the Amazon Web Services accounts for which to export license recommendations.

    If your account is the management account of an organization, use this parameter to specify the member account for which you want to export recommendations.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter is omitted, recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " + }, + "filters":{ + "shape":"LicenseRecommendationFilters", + "documentation":"

    An array of objects to specify a filter that exports a more specific set of license recommendations.

    " + }, + "fieldsToExport":{ + "shape":"ExportableLicenseFields", + "documentation":"

    The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide.

    " + }, + "s3DestinationConfig":{"shape":"S3DestinationConfig"}, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

    The format of the export file.

    A CSV file is the only export format currently supported.

    " + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

    Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

    The member accounts must also be opted in to Compute Optimizer, and trusted access for Compute Optimizer must be enabled in the organization account. For more information, see Compute Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer User Guide.

    If this parameter is omitted, recommendations for member accounts of the organization aren't included in the export file .

    This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

    " + } + } + }, + "ExportLicenseRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The identification number of the export job.

    To view the status of an export job, use the DescribeRecommendationExportJobs action and specify the job ID.

    " + }, + "s3Destination":{"shape":"S3Destination"} + } + }, "ExportableAutoScalingGroupField":{ "type":"string", "enum":[ @@ -1402,7 +1487,13 @@ "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", "EffectiveRecommendationPreferencesInferredWorkloadTypes", "InferredWorkloadTypes", - "RecommendationOptionsMigrationEffort" + "RecommendationOptionsMigrationEffort", + "CurrentInstanceGpuInfo", + "RecommendationOptionsInstanceGpuInfo", + "UtilizationMetricsGpuPercentageMaximum", + "UtilizationMetricsGpuMemoryPercentageMaximum", + "RecommendationOptionsProjectedUtilizationMetricsGpuPercentageMaximum", + "RecommendationOptionsProjectedUtilizationMetricsGpuMemoryPercentageMaximum" ] }, "ExportableAutoScalingGroupFields":{ @@ -1501,7 +1592,14 @@ "InstanceState", "Tags", "ExternalMetricStatusCode", - "ExternalMetricStatusReason" + "ExternalMetricStatusReason", + "CurrentInstanceGpuInfo", + "RecommendationOptionsInstanceGpuInfo", + "UtilizationMetricsGpuPercentageMaximum", + "UtilizationMetricsGpuMemoryPercentageMaximum", + "RecommendationOptionsProjectedUtilizationMetricsGpuPercentageMaximum", + "RecommendationOptionsProjectedUtilizationMetricsGpuMemoryPercentageMaximum", + "Idle" ] }, "ExportableInstanceFields":{ @@ -1544,6 +1642,36 @@ "type":"list", "member":{"shape":"ExportableLambdaFunctionField"} }, + "ExportableLicenseField":{ + "type":"string", + "enum":[ + "AccountId", + "ResourceArn", + "LookbackPeriodInDays", + "LastRefreshTimestamp", + "Finding", + "FindingReasonCodes", + "CurrentLicenseConfigurationNumberOfCores", + "CurrentLicenseConfigurationInstanceType", + "CurrentLicenseConfigurationOperatingSystem", + "CurrentLicenseConfigurationLicenseName", + "CurrentLicenseConfigurationLicenseEdition", + "CurrentLicenseConfigurationLicenseModel", + "CurrentLicenseConfigurationLicenseVersion", + "CurrentLicenseConfigurationMetricsSource", + "RecommendationOptionsOperatingSystem", + "RecommendationOptionsLicenseEdition", + "RecommendationOptionsLicenseModel", + "RecommendationOptionsSavingsOpportunityPercentage", + "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "RecommendationOptionsEstimatedMonthlySavingsValue", + "Tags" + ] + }, + "ExportableLicenseFields":{ + "type":"list", + "member":{"shape":"ExportableLicenseField"} + }, "ExportableVolumeField":{ "type":"string", "enum":[ @@ -1576,7 +1704,8 @@ "RecommendationOptionsEstimatedMonthlySavingsCurrency", "RecommendationOptionsEstimatedMonthlySavingsValue", "RootVolume", - "Tags" + "Tags", + "CurrentConfigurationRootVolume" ] }, "ExportableVolumeFields":{ @@ -2074,6 +2203,48 @@ } } }, + "GetLicenseRecommendationsRequest":{ + "type":"structure", + "members":{ + "resourceArns":{ + "shape":"ResourceArns", + "documentation":"

    The ARN that identifies the Amazon EC2 instance.

    The following is the format of the ARN:

    arn:aws:ec2:region:aws_account_id:instance/instance-id

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of license recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of license recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " + }, + "filters":{ + "shape":"LicenseRecommendationFilters", + "documentation":"

    An array of objects to specify a filter that returns a more specific list of license recommendations.

    " + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The ID of the Amazon Web Services account for which to return license recommendations.

    If your account is the management account of an organization, use this parameter to specify the member account for which you want to return license recommendations.

    Only one account ID can be specified per request.

    " + } + } + }, + "GetLicenseRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of license recommendations.

    " + }, + "licenseRecommendations":{ + "shape":"LicenseRecommendations", + "documentation":"

    An array of objects that describe license recommendations.

    " + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

    An array of objects that describe errors of the request.

    " + } + } + }, "GetRecommendationError":{ "type":"structure", "members":{ @@ -2161,6 +2332,36 @@ } } }, + "Gpu":{ + "type":"structure", + "members":{ + "gpuCount":{ + "shape":"GpuCount", + "documentation":"

    The number of GPUs for the instance type.

    " + }, + "gpuMemorySizeInMiB":{ + "shape":"GpuMemorySizeInMiB", + "documentation":"

    The total size of the memory for the GPU accelerators for the instance type, in MiB.

    " + } + }, + "documentation":"

    Describes the GPU accelerators for the instance type.

    " + }, + "GpuCount":{"type":"integer"}, + "GpuInfo":{ + "type":"structure", + "members":{ + "gpus":{ + "shape":"Gpus", + "documentation":"

    Describes the GPU accelerators for the instance type.

    " + } + }, + "documentation":"

    Describes the GPU accelerator settings for the instance type.

    " + }, + "GpuMemorySizeInMiB":{"type":"integer"}, + "Gpus":{ + "type":"list", + "member":{"shape":"Gpu"} + }, "High":{"type":"long"}, "Identifier":{"type":"string"}, "IncludeMemberAccounts":{"type":"boolean"}, @@ -2212,6 +2413,13 @@ "type":"list", "member":{"shape":"InstanceArn"} }, + "InstanceIdle":{ + "type":"string", + "enum":[ + "True", + "False" + ] + }, "InstanceName":{"type":"string"}, "InstanceRecommendation":{ "type":"structure", @@ -2283,6 +2491,14 @@ "externalMetricStatus":{ "shape":"ExternalMetricStatus", "documentation":"

    An object that describes Compute Optimizer's integration status with your external metrics provider.

    " + }, + "currentInstanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

    Describes the GPU accelerator settings for the current instance type.

    " + }, + "idle":{ + "shape":"InstanceIdle", + "documentation":"

    Describes if an Amazon EC2 instance is idle.

    " } }, "documentation":"

    Describes an Amazon EC2 instance recommendation.

    " @@ -2305,7 +2521,11 @@ "DiskIOPSOverprovisioned", "DiskIOPSUnderprovisioned", "DiskThroughputOverprovisioned", - "DiskThroughputUnderprovisioned" + "DiskThroughputUnderprovisioned", + "GPUUnderprovisioned", + "GPUOverprovisioned", + "GPUMemoryUnderprovisioned", + "GPUMemoryOverprovisioned" ] }, "InstanceRecommendationFindingReasonCodes":{ @@ -2342,6 +2562,10 @@ "migrationEffort":{ "shape":"MigrationEffort", "documentation":"

    The level of effort required to migrate from the current instance type to the recommended instance type.

    For example, the migration effort is Low if Amazon EMR is the inferred workload type and an Amazon Web Services Graviton instance type is recommended. The migration effort is Medium if a workload type couldn't be inferred but an Amazon Web Services Graviton instance type is recommended. The migration effort is VeryLow if both the current and recommended instance types are of the same CPU architecture.

    " + }, + "instanceGpuInfo":{ + "shape":"GpuInfo", + "documentation":"

    Describes the GPU accelerator settings for the recommended instance type.

    " } }, "documentation":"

    Describes a recommendation option for an Amazon EC2 instance.

    " @@ -2625,6 +2849,185 @@ }, "LastRefreshTimestamp":{"type":"timestamp"}, "LastUpdatedTimestamp":{"type":"timestamp"}, + "LicenseConfiguration":{ + "type":"structure", + "members":{ + "numberOfCores":{ + "shape":"NumberOfCores", + "documentation":"

    The current number of cores associated with the instance.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The instance type used in the license.

    " + }, + "operatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

    The operating system of the instance.

    " + }, + "licenseEdition":{ + "shape":"LicenseEdition", + "documentation":"

    The edition of the license for the application that runs on the instance.

    " + }, + "licenseName":{ + "shape":"LicenseName", + "documentation":"

    The name of the license for the application that runs on the instance.

    " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

    The license type associated with the instance.

    " + }, + "licenseVersion":{ + "shape":"LicenseVersion", + "documentation":"

    The version of the license for the application that runs on the instance.

    " + }, + "metricsSource":{ + "shape":"MetricsSource", + "documentation":"

    The list of metric sources required to generate recommendations for commercial software licenses.

    " + } + }, + "documentation":"

    Describes the configuration of a license for an Amazon EC2 instance.

    " + }, + "LicenseEdition":{ + "type":"string", + "enum":[ + "Enterprise", + "Standard", + "Free", + "NoLicenseEditionFound" + ] + }, + "LicenseFinding":{ + "type":"string", + "enum":[ + "InsufficientMetrics", + "Optimized", + "NotOptimized" + ] + }, + "LicenseFindingReasonCode":{ + "type":"string", + "enum":[ + "InvalidCloudWatchApplicationInsightsSetup", + "CloudWatchApplicationInsightsError", + "LicenseOverprovisioned", + "Optimized" + ] + }, + "LicenseFindingReasonCodes":{ + "type":"list", + "member":{"shape":"LicenseFindingReasonCode"} + }, + "LicenseModel":{ + "type":"string", + "enum":[ + "LicenseIncluded", + "BringYourOwnLicense" + ] + }, + "LicenseName":{ + "type":"string", + "enum":["SQLServer"] + }, + "LicenseRecommendation":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN that identifies the Amazon EC2 instance.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The Amazon Web Services account ID of the license.

    " + }, + "currentLicenseConfiguration":{ + "shape":"LicenseConfiguration", + "documentation":"

    An object that describes the current configuration of an instance that runs on a license.

    " + }, + "lookbackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for an instance that runs on a license.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The timestamp of when the license recommendation was last generated.

    " + }, + "finding":{ + "shape":"LicenseFinding", + "documentation":"

    The finding classification for an instance that runs on a license.

    Findings include:

    • InsufficentMetrics — When Compute Optimizer detects that your CloudWatch Application Insights isn't enabled or is enabled with insufficient permissions.

    • NotOptimized — When Compute Optimizer detects that your EC2 infrastructure isn't using any of the SQL server license features you're paying for, a license is considered not optimized.

    • Optimized — When Compute Optimizer detects that all specifications of your license meet the performance requirements of your workload.

    " + }, + "findingReasonCodes":{ + "shape":"LicenseFindingReasonCodes", + "documentation":"

    The reason for the finding classification for an instance that runs on a license.

    Finding reason codes include:

    • Optimized — All specifications of your license meet the performance requirements of your workload.

    • LicenseOverprovisioned — A license is considered over-provisioned when your license can be downgraded while still meeting the performance requirements of your workload.

    • InvalidCloudwatchApplicationInsights — CloudWatch Application Insights isn't configured properly.

    • CloudwatchApplicationInsightsError — There is a CloudWatch Application Insights error.

    " + }, + "licenseRecommendationOptions":{ + "shape":"LicenseRecommendationOptions", + "documentation":"

    An array of objects that describe the license recommendation options.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    A list of tags assigned to an EC2 instance.

    " + } + }, + "documentation":"

    Describes a license recommendation for an EC2 instance.

    " + }, + "LicenseRecommendationFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LicenseRecommendationFilterName", + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    Specify FindingReasonCode to return recommendations with a specific finding reason code.

    You can filter your license recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your license recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all license recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your license recommendations. Use this filter to find all of your license recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your license recommendations with a tag key value of Owner or without any tag keys assigned.

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values for this parameter are as follows, depending on what you specify for the name parameter:

    • If you specify the name parameter as Finding, then specify Optimized, NotOptimized, or InsufficentMetrics.

    • If you specify the name parameter as FindingReasonCode, then specify Optimized, LicenseOverprovisioned, InvalidCloudwatchApplicationInsights, or CloudwatchApplicationInsightsError.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of license recommendations. Use this filter with the GetLicenseRecommendation action.

    " + }, + "LicenseRecommendationFilterName":{ + "type":"string", + "enum":[ + "Finding", + "FindingReasonCode", + "LicenseName" + ] + }, + "LicenseRecommendationFilters":{ + "type":"list", + "member":{"shape":"LicenseRecommendationFilter"} + }, + "LicenseRecommendationOption":{ + "type":"structure", + "members":{ + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the license recommendation option.

    The top recommendation option is ranked as 1.

    " + }, + "operatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

    The operating system of a license recommendation option.

    " + }, + "licenseEdition":{ + "shape":"LicenseEdition", + "documentation":"

    The recommended edition of the license for the application that runs on the instance.

    " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

    The recommended license type associated with the instance.

    " + }, + "savingsOpportunity":{"shape":"SavingsOpportunity"} + }, + "documentation":"

    Describes the recommendation options for licenses.

    " + }, + "LicenseRecommendationOptions":{ + "type":"list", + "member":{"shape":"LicenseRecommendationOption"} + }, + "LicenseRecommendations":{ + "type":"list", + "member":{"shape":"LicenseRecommendation"} + }, + "LicenseVersion":{"type":"string"}, "LimitExceededException":{ "type":"structure", "members":{ @@ -2679,9 +3082,30 @@ "NETWORK_IN_BYTES_PER_SECOND", "NETWORK_OUT_BYTES_PER_SECOND", "NETWORK_PACKETS_IN_PER_SECOND", - "NETWORK_PACKETS_OUT_PER_SECOND" + "NETWORK_PACKETS_OUT_PER_SECOND", + "GPU_PERCENTAGE", + "GPU_MEMORY_PERCENTAGE" ] }, + "MetricProviderArn":{"type":"string"}, + "MetricSource":{ + "type":"structure", + "members":{ + "provider":{ + "shape":"MetricSourceProvider", + "documentation":"

    The name of the metric source provider.

    " + }, + "providerArn":{ + "shape":"MetricProviderArn", + "documentation":"

    The ARN of the metric source provider.

    " + } + }, + "documentation":"

    The list of metric sources required to generate recommendations for commercial software licenses.

    " + }, + "MetricSourceProvider":{ + "type":"string", + "enum":["CloudWatchApplicationInsights"] + }, "MetricStatistic":{ "type":"string", "enum":[ @@ -2694,6 +3118,10 @@ "type":"list", "member":{"shape":"MetricValue"} }, + "MetricsSource":{ + "type":"list", + "member":{"shape":"MetricSource"} + }, "MigrationEffort":{ "type":"string", "enum":[ @@ -2717,8 +3145,10 @@ "NullableCpu":{"type":"integer"}, "NullableMemory":{"type":"integer"}, "NullableMemoryReservation":{"type":"integer"}, + "NumberOfCores":{"type":"integer"}, "NumberOfInvocations":{"type":"long"}, "NumberOfMemberAccountsOptedIn":{"type":"integer"}, + "OperatingSystem":{"type":"string"}, "OptInRequiredException":{ "type":"structure", "members":{ @@ -2754,7 +3184,7 @@ "members":{ "name":{ "shape":"MetricName", - "documentation":"

    The name of the projected utilization metric.

    The following projected utilization metrics are returned:

    • Cpu - The projected percentage of allocated EC2 compute units that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the processing power required to run an application on the recommendation option.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

      Units: Percent

    • Memory - The percentage of memory that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the amount of memory required to run an application on the recommendation option.

      Units: Percent

      The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + "documentation":"

    The name of the projected utilization metric.

    The following projected utilization metrics are returned:

    • Cpu - The projected percentage of allocated EC2 compute units that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the processing power required to run an application on the recommendation option.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

    • Memory - The percentage of memory that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the amount of memory required to run an application on the recommendation option.

      Units: Percent

      The Memory metric is only returned for resources with the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    • GPU - The projected percentage of allocated GPUs if you adjust your configurations to Compute Optimizer's recommendation option.

    • GPU_MEMORY - The projected percentage of total GPU memory if you adjust your configurations to Compute Optimizer's recommendation option.

      The GPU and GPU_MEMORY metrics are only returned for resources with the unified CloudWatch Agent installed on them. For more information, see Enabling NVIDIA GPU utilization with the CloudWatch Agent.

    " }, "timestamps":{ "shape":"Timestamps", @@ -2765,7 +3195,7 @@ "documentation":"

    The values of the projected utilization metrics.

    " } }, - "documentation":"

    Describes a projected utilization metric of a recommendation option, such as an Amazon EC2 instance. This represents the projected utilization of a recommendation option had you used that resource during the analyzed period.

    Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

    The Cpu and Memory metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    " + "documentation":"

    Describes a projected utilization metric of a recommendation option, such as an Amazon EC2 instance. This represents the projected utilization of a recommendation option had you used that resource during the analyzed period.

    Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

    The Cpu, Memory, GPU, and GPU_MEMORY metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, these metrics are only returned for resources with the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent and Enabling NVIDIA GPU utilization with the CloudWatch Agent.

    " }, "ProjectedMetrics":{ "type":"list", @@ -2941,7 +3371,8 @@ "AutoScalingGroup", "EbsVolume", "LambdaFunction", - "EcsService" + "EcsService", + "License" ] }, "RecommendationSources":{ @@ -2977,7 +3408,7 @@ }, "inferredWorkloadSavings":{ "shape":"InferredWorkloadSavings", - "documentation":"

    An array of objects that describes the estimated monthly saving amounts for the instances running on the specified inferredWorkloadTypes. The array contains the top three savings opportunites for the instances running inferred workload types.

    " + "documentation":"

    An array of objects that describes the estimated monthly saving amounts for the instances running on the specified inferredWorkloadTypes. The array contains the top five savings opportunites for the instances that run inferred workload types.

    " } }, "documentation":"

    A summary of a recommendation.

    " @@ -3006,6 +3437,10 @@ "member":{"shape":"RecommendedOptionProjectedMetric"} }, "ResourceArn":{"type":"string"}, + "ResourceArns":{ + "type":"list", + "member":{"shape":"ResourceArn"} + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -3023,7 +3458,8 @@ "EbsVolume", "LambdaFunction", "NotApplicable", - "EcsService" + "EcsService", + "License" ] }, "RootVolume":{"type":"boolean"}, @@ -3239,7 +3675,7 @@ "members":{ "name":{ "shape":"MetricName", - "documentation":"

    The name of the utilization metric.

    The following utilization metrics are available:

    • Cpu - The percentage of allocated EC2 compute units that are currently in use on the instance. This metric identifies the processing power required to run an application on the instance.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

      Units: Percent

    • Memory - The percentage of memory that is currently in use on the instance. This metric identifies the amount of memory required to run an application on the instance.

      Units: Percent

      The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    • EBS_READ_OPS_PER_SECOND - The completed read operations from all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_WRITE_OPS_PER_SECOND - The completed write operations to all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    • EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    • DISK_READ_OPS_PER_SECOND - The completed read operations from all instance store volumes available to the instance in a specified period of time.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_WRITE_OPS_PER_SECOND - The completed write operations from all instance store volumes available to the instance in a specified period of time.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_READ_BYTES_PER_SECOND - The bytes read from all instance store volumes available to the instance. This metric is used to determine the volume of the data the application reads from the disk of the instance. This can be used to determine the speed of the application.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_WRITE_BYTES_PER_SECOND - The bytes written to all instance store volumes available to the instance. This metric is used to determine the volume of the data the application writes onto the disk of the instance. This can be used to determine the speed of the application.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • NETWORK_IN_BYTES_PER_SECOND - The number of bytes received by the instance on all network interfaces. This metric identifies the volume of incoming network traffic to a single instance.

    • NETWORK_OUT_BYTES_PER_SECOND - The number of bytes sent out by the instance on all network interfaces. This metric identifies the volume of outgoing network traffic from a single instance.

    • NETWORK_PACKETS_IN_PER_SECOND - The number of packets received by the instance on all network interfaces. This metric identifies the volume of incoming traffic in terms of the number of packets on a single instance.

    • NETWORK_PACKETS_OUT_PER_SECOND - The number of packets sent out by the instance on all network interfaces. This metric identifies the volume of outgoing traffic in terms of the number of packets on a single instance.

    " + "documentation":"

    The name of the utilization metric.

    The following utilization metrics are available:

    • Cpu - The percentage of allocated EC2 compute units that are currently in use on the instance. This metric identifies the processing power required to run an application on the instance.

      Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

      Units: Percent

    • Memory - The percentage of memory that is currently in use on the instance. This metric identifies the amount of memory required to run an application on the instance.

      Units: Percent

      The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

    • GPU - The percentage of allocated GPUs that currently run on the instance.

    • GPU_MEMORY - The percentage of total GPU memory that currently runs on the instance.

      The GPU and GPU_MEMORY metrics are only returned for resources with the unified CloudWatch Agent installed on them. For more information, see Enabling NVIDIA GPU utilization with the CloudWatch Agent.

    • EBS_READ_OPS_PER_SECOND - The completed read operations from all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_WRITE_OPS_PER_SECOND - The completed write operations to all EBS volumes attached to the instance in a specified period of time.

      Unit: Count

    • EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    • EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes attached to the instance in a specified period of time.

      Unit: Bytes

    • DISK_READ_OPS_PER_SECOND - The completed read operations from all instance store volumes available to the instance in a specified period of time.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_WRITE_OPS_PER_SECOND - The completed write operations from all instance store volumes available to the instance in a specified period of time.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_READ_BYTES_PER_SECOND - The bytes read from all instance store volumes available to the instance. This metric is used to determine the volume of the data the application reads from the disk of the instance. This can be used to determine the speed of the application.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • DISK_WRITE_BYTES_PER_SECOND - The bytes written to all instance store volumes available to the instance. This metric is used to determine the volume of the data the application writes onto the disk of the instance. This can be used to determine the speed of the application.

      If there are no instance store volumes, either the value is 0 or the metric is not reported.

    • NETWORK_IN_BYTES_PER_SECOND - The number of bytes received by the instance on all network interfaces. This metric identifies the volume of incoming network traffic to a single instance.

    • NETWORK_OUT_BYTES_PER_SECOND - The number of bytes sent out by the instance on all network interfaces. This metric identifies the volume of outgoing network traffic from a single instance.

    • NETWORK_PACKETS_IN_PER_SECOND - The number of packets received by the instance on all network interfaces. This metric identifies the volume of incoming traffic in terms of the number of packets on a single instance.

    • NETWORK_PACKETS_OUT_PER_SECOND - The number of packets sent out by the instance on all network interfaces. This metric identifies the volume of outgoing traffic in terms of the number of packets on a single instance.

    " }, "statistic":{ "shape":"MetricStatistic", diff --git a/services/config/pom.xml b/services/config/pom.xml index 650239e3d8aa..d90e950e69fb 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/customization.config b/services/config/src/main/resources/codegen-resources/customization.config index 9a537e0cc596..d113522aaf3f 100644 --- a/services/config/src/main/resources/codegen-resources/customization.config +++ b/services/config/src/main/resources/codegen-resources/customization.config @@ -16,7 +16,7 @@ "getComplianceSummaryByResourceType", "getDiscoveredResourceCounts" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "startConfigRulesEvaluation" ] } diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 1470a73a512e..9822a66a7a16 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index a4012e4c41c3..2b1d93b7baf6 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -228,6 +228,18 @@ "output_token": "NextToken", "result_key": "UserSummaryList" }, + "ListViewVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ViewVersionSummaryList" + }, + "ListViews": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ViewsSummaryList" + }, "SearchAvailablePhoneNumbers": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index a3a1d08c02a2..b289c90e50a7 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -427,7 +427,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This API is in preview release for Amazon Connect and is subject to change.

    Creates a new queue for the specified Amazon Connect instance.

    If the number being used in the input is claimed to a traffic distribution group, and you are calling this API using an instance in the Amazon Web Services Region where the traffic distribution group was created, you can use either a full phone number ARN or UUID value for the OutboundCallerIdNumberId value of the OutboundCallerConfig request body parameter. However, if the number is claimed to a traffic distribution group and you are calling this API using an instance in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

    Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

    " + "documentation":"

    This API is in preview release for Amazon Connect and is subject to change.

    Creates a new queue for the specified Amazon Connect instance.

    • If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

    • Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

    • If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

    " }, "CreateQuickConnect":{ "name":"CreateQuickConnect", @@ -541,7 +541,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ResourceNotReadyException"} ], - "documentation":"

    Creates a traffic distribution group given an Amazon Connect instance that has been replicated.

    For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide.

    " + "documentation":"

    Creates a traffic distribution group given an Amazon Connect instance that has been replicated.

    You can change the SignInConfig distribution only for a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

    For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide.

    " }, "CreateUseCase":{ "name":"CreateUseCase", @@ -598,6 +598,49 @@ ], "documentation":"

    Creates a new user hierarchy group.

    " }, + "CreateView":{ + "name":"CreateView", + "http":{ + "method":"PUT", + "requestUri":"/views/{InstanceId}" + }, + "input":{"shape":"CreateViewRequest"}, + "output":{"shape":"CreateViewResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Creates a new view with the possible status of SAVED or PUBLISHED.

    The views will have a unique name for each connect instance.

    It performs basic content validation if the status is SAVED or full content validation if the status is set to PUBLISHED. An error is returned if validation fails. It associates either the $SAVED qualifier or both of the $SAVED and $LATEST qualifiers with the provided view content based on the status. The view is idempotent if ClientToken is provided.

    ", + "idempotent":true + }, + "CreateViewVersion":{ + "name":"CreateViewVersion", + "http":{ + "method":"PUT", + "requestUri":"/views/{InstanceId}/{ViewId}/versions" + }, + "input":{"shape":"CreateViewVersionRequest"}, + "output":{"shape":"CreateViewVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Publishes a new version of the view identifier.

    Versions are immutable and monotonically increasing.

    It returns the highest version if there is no change in content compared to that version. An error is displayed if the supplied ViewContentSha256 is different from the ViewContentSha256 of the $LATEST alias.

    ", + "idempotent":true + }, "CreateVocabulary":{ "name":"CreateVocabulary", "http":{ @@ -931,6 +974,44 @@ ], "documentation":"

    Deletes an existing user hierarchy group. It must not be associated with any agents or have any active child groups.

    " }, + "DeleteView":{ + "name":"DeleteView", + "http":{ + "method":"DELETE", + "requestUri":"/views/{InstanceId}/{ViewId}" + }, + "input":{"shape":"DeleteViewRequest"}, + "output":{"shape":"DeleteViewResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes the view entirely. It deletes the view and all associated qualifiers (versions and aliases).

    " + }, + "DeleteViewVersion":{ + "name":"DeleteViewVersion", + "http":{ + "method":"DELETE", + "requestUri":"/views/{InstanceId}/{ViewId}/versions/{ViewVersion}" + }, + "input":{"shape":"DeleteViewVersionRequest"}, + "output":{"shape":"DeleteViewVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Deletes the particular version specified in ViewVersion identifier.

    " + }, "DeleteVocabulary":{ "name":"DeleteVocabulary", "http":{ @@ -1304,6 +1385,24 @@ ], "documentation":"

    Describes the hierarchy structure of the specified Amazon Connect instance.

    " }, + "DescribeView":{ + "name":"DescribeView", + "http":{ + "method":"GET", + "requestUri":"/views/{InstanceId}/{ViewId}" + }, + "input":{"shape":"DescribeViewRequest"}, + "output":{"shape":"DescribeViewResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieves the view for the specified Amazon Connect instance and view identifier.

    The view identifier can be supplied as a ViewId or ARN.

    $SAVED needs to be supplied if a view is unpublished.

    The view identifier can contain an optional qualifier, for example, <view-id>:$SAVED, which is either an actual version number or an Amazon Connect managed qualifier $SAVED | $LATEST. If it is not supplied, then $LATEST is assumed for customer managed views and an error is returned if there is no published content available. Version 1 is assumed for Amazon Web Services managed views.

    " + }, "DescribeVocabulary":{ "name":"DescribeVocabulary", "http":{ @@ -1949,7 +2048,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Provides information about the phone numbers for the specified Amazon Connect instance.

    For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

    The phone number Arn value that is returned from each of the items in the PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail with a ResourceNotFoundException. Instead, use the ListPhoneNumbersV2 API. It returns the new phone number ARN that can be used to tag phone number resources.

    " + "documentation":"

    Provides information about the phone numbers for the specified Amazon Connect instance.

    For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

    • We recommend using ListPhoneNumbersV2 to return phone number types. ListPhoneNumbers doesn't support number types UIFN, SHARED, THIRD_PARTY_TF, and THIRD_PARTY_DID. While it returns numbers of those types, it incorrectly lists them as TOLL_FREE or DID.

    • The phone number Arn value that is returned from each of the items in the PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail with a ResourceNotFoundException. Instead, use the ListPhoneNumbersV2 API. It returns the new phone number ARN that can be used to tag phone number resources.

    " }, "ListPhoneNumbersV2":{ "name":"ListPhoneNumbersV2", @@ -2255,6 +2354,42 @@ ], "documentation":"

    Provides summary information about the users for the specified Amazon Connect instance.

    " }, + "ListViewVersions":{ + "name":"ListViewVersions", + "http":{ + "method":"GET", + "requestUri":"/views/{InstanceId}/{ViewId}/versions" + }, + "input":{"shape":"ListViewVersionsRequest"}, + "output":{"shape":"ListViewVersionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Returns all the available versions for the specified Amazon Connect instance and view identifier.

    Results will be sorted from highest to lowest.

    " + }, + "ListViews":{ + "name":"ListViews", + "http":{ + "method":"GET", + "requestUri":"/views/{InstanceId}" + }, + "input":{"shape":"ListViewsRequest"}, + "output":{"shape":"ListViewsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Returns views in the given instance.

    Results are sorted primarily by type, and secondarily by name.

    " + }, "MonitorContact":{ "name":"MonitorContact", "http":{ @@ -2635,7 +2770,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Ends the specified contact. This call does not work for the following initiation methods:

    • DISCONNECT

    • TRANSFER

    • QUEUE_TRANSFER

    " + "documentation":"

    Ends the specified contact. This call does not work for voice contacts that use the following initiation methods:

    • DISCONNECT

    • TRANSFER

    • QUEUE_TRANSFER

    Chat and task contacts, however, can be terminated in any state, regardless of initiation method.

    " }, "StopContactRecording":{ "name":"StopContactRecording", @@ -3113,7 +3248,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This API is in preview release for Amazon Connect and is subject to change.

    Updates the outbound caller ID name, number, and outbound whisper flow for a specified queue.

    If the number being used in the input is claimed to a traffic distribution group, and you are calling this API using an instance in the Amazon Web Services Region where the traffic distribution group was created, you can use either a full phone number ARN or UUID value for the OutboundCallerIdNumberId value of the OutboundCallerConfig request body parameter. However, if the number is claimed to a traffic distribution group and you are calling this API using an instance in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

    Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

    " + "documentation":"

    This API is in preview release for Amazon Connect and is subject to change.

    Updates the outbound caller ID name, number, and outbound whisper flow for a specified queue.

    • If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

    • Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

    • If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

    " }, "UpdateQueueStatus":{ "name":"UpdateQueueStatus", @@ -3311,7 +3446,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Updates the traffic distribution for a given traffic distribution group.

    You can change the SignInConfig only for a default TrafficDistributionGroup. If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

    For more information about updating a traffic distribution group, see Update telephony traffic distribution across Amazon Web Services Regions in the Amazon Connect Administrator Guide.

    " + "documentation":"

    Updates the traffic distribution for a given traffic distribution group.

    You can change the SignInConfig distribution only for a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

    For more information about updating a traffic distribution group, see Update telephony traffic distribution across Amazon Web Services Regions in the Amazon Connect Administrator Guide.

    " }, "UpdateUserHierarchy":{ "name":"UpdateUserHierarchy", @@ -3426,6 +3561,45 @@ {"shape":"InternalServiceException"} ], "documentation":"

    Assigns the specified security profiles to the specified user.

    " + }, + "UpdateViewContent":{ + "name":"UpdateViewContent", + "http":{ + "method":"POST", + "requestUri":"/views/{InstanceId}/{ViewId}" + }, + "input":{"shape":"UpdateViewContentRequest"}, + "output":{"shape":"UpdateViewContentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Updates the view content of the given view identifier in the specified Amazon Connect instance.

    It performs content validation if Status is set to SAVED and performs full content validation if Status is PUBLISHED. Note that the $SAVED alias' content will always be updated, but the $LATEST alias' content will only be updated if Status is PUBLISHED.

    " + }, + "UpdateViewMetadata":{ + "name":"UpdateViewMetadata", + "http":{ + "method":"POST", + "requestUri":"/views/{InstanceId}/{ViewId}/metadata" + }, + "input":{"shape":"UpdateViewMetadataRequest"}, + "output":{"shape":"UpdateViewMetadataResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

    Updates the view metadata. Note that either Name or Description must be provided.

    " } }, "shapes":{ @@ -4645,7 +4819,7 @@ "documentation":"

    The message.

    " } }, - "documentation":"

    The contact with the specified ID is not active or does not exist. Applies to Voice calls only, not to Chat, Task, or Voice Callback.

    ", + "documentation":"

    The contact with the specified ID is not active or does not exist. Applies to Voice calls only, not to Chat or Task contacts.

    ", "error":{"httpStatusCode":410}, "exception":true }, @@ -5675,6 +5849,94 @@ } } }, + "CreateViewRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Status", + "Content", + "Name" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ClientToken":{ + "shape":"ViewsClientToken", + "documentation":"

    A unique Id for each create view request to avoid duplicate view creation. For example, the view is idempotent ClientToken is provided.

    " + }, + "Status":{ + "shape":"ViewStatus", + "documentation":"

    Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content.

    " + }, + "Content":{ + "shape":"ViewInputContent", + "documentation":"

    View content containing all content necessary to render a view except for runtime input data.

    The total uncompressed content has a maximum file size of 400kB.

    " + }, + "Description":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view.

    " + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the view resource (not specific to view version).These tags can be used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

    " + } + } + }, + "CreateViewResponse":{ + "type":"structure", + "members":{ + "View":{ + "shape":"View", + "documentation":"

    A view resource object. Contains metadata and content necessary to render the view.

    " + } + } + }, + "CreateViewVersionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + }, + "VersionDescription":{ + "shape":"ViewDescription", + "documentation":"

    The description for the version being published.

    " + }, + "ViewContentSha256":{ + "shape":"ViewContentSha256", + "documentation":"

    Indicates the checksum value of the latest published view content.

    " + } + } + }, + "CreateViewVersionResponse":{ + "type":"structure", + "members":{ + "View":{ + "shape":"View", + "documentation":"

    All view data is contained within the View object.

    " + } + } + }, "CreateVocabularyRequest":{ "type":"structure", "required":[ @@ -6351,6 +6613,65 @@ } } }, + "DeleteViewRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + } + } + }, + "DeleteViewResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteViewVersionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId", + "ViewVersion" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + }, + "ViewVersion":{ + "shape":"ViewVersion", + "documentation":"

    The version number of the view.

    ", + "location":"uri", + "locationName":"ViewVersion" + } + } + }, + "DeleteViewVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteVocabularyRequest":{ "type":"structure", "required":[ @@ -7011,6 +7332,36 @@ } } }, + "DescribeViewRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The ViewId of the view. This must be an ARN for Amazon Web Services managed views.

    ", + "location":"uri", + "locationName":"ViewId" + } + } + }, + "DescribeViewResponse":{ + "type":"structure", + "members":{ + "View":{ + "shape":"View", + "documentation":"

    All view data is contained within the View object.

    " + } + } + }, "DescribeVocabularyRequest":{ "type":"structure", "required":[ @@ -11332,6 +11683,97 @@ } } }, + "ListViewVersionsRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + }, + "NextToken":{ + "shape":"ViewsNextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return per page. The default MaxResult size is 100.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListViewVersionsResponse":{ + "type":"structure", + "members":{ + "ViewVersionSummaryList":{ + "shape":"ViewVersionSummaryList", + "documentation":"

    A list of view version summaries.

    " + }, + "NextToken":{ + "shape":"ViewsNextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + } + } + }, + "ListViewsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "Type":{ + "shape":"ViewType", + "documentation":"

    The type of the view.

    ", + "location":"querystring", + "locationName":"type" + }, + "NextToken":{ + "shape":"ViewsNextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return per page. The default MaxResult size is 100.

    ", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListViewsResponse":{ + "type":"structure", + "members":{ + "ViewsSummaryList":{ + "shape":"ViewsSummaryList", + "documentation":"

    A list of view summaries.

    " + }, + "NextToken":{ + "shape":"ViewsNextToken", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + } + } + }, "Long":{"type":"long"}, "MaxResult10":{ "type":"integer", @@ -11368,6 +11810,11 @@ "max":7, "min":1 }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, "MaximumResultReturnedException":{ "type":"structure", "members":{ @@ -15148,6 +15595,15 @@ ] }, "Timestamp":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    Displayed when rate-related API limits are exceeded.

    ", + "error":{"httpStatusCode":429}, + "exception":true + }, "TrafficDistributionGroup":{ "type":"structure", "members":{ @@ -15181,7 +15637,7 @@ }, "IsDefault":{ "shape":"Boolean", - "documentation":"

    Whether this is the default traffic distribution group created during instance replication. The default traffic distribution group cannot be deleted by the DeleteTrafficDistributionGroup API. The default traffic distribution group is deleted as part of the process for deleting a replica.

    You can change the SignInConfig only for a default TrafficDistributionGroup. If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

    " + "documentation":"

    Whether this is the default traffic distribution group created during instance replication. The default traffic distribution group cannot be deleted by the DeleteTrafficDistributionGroup API. The default traffic distribution group is deleted as part of the process for deleting a replica.

    You can change the SignInConfig distribution only for a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

    " } }, "documentation":"

    Information about a traffic distribution group.

    " @@ -16714,6 +17170,80 @@ } } }, + "UpdateViewContentRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId", + "Status", + "Content" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + }, + "Status":{ + "shape":"ViewStatus", + "documentation":"

    Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content.

    " + }, + "Content":{ + "shape":"ViewInputContent", + "documentation":"

    View content containing all content necessary to render a view except for runtime input data and the runtime input schema, which is auto-generated by this operation.

    The total uncompressed content has a maximum file size of 400kB.

    " + } + } + }, + "UpdateViewContentResponse":{ + "type":"structure", + "members":{ + "View":{ + "shape":"View", + "documentation":"

    A view resource object. Contains metadata and content necessary to render the view.

    " + } + } + }, + "UpdateViewMetadataRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ViewId" + ], + "members":{ + "InstanceId":{ + "shape":"ViewsInstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

    ", + "location":"uri", + "locationName":"InstanceId" + }, + "ViewId":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view. Both ViewArn and ViewId can be used.

    ", + "location":"uri", + "locationName":"ViewId" + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view.

    " + }, + "Description":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view.

    " + } + } + }, + "UpdateViewMetadataResponse":{ + "type":"structure", + "members":{ + } + }, "Url":{"type":"string"}, "UrlReference":{ "type":"structure", @@ -17109,6 +17639,241 @@ "box":true, "min":1 }, + "View":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view.

    " + }, + "Arn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the view.

    " + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view.

    " + }, + "Status":{ + "shape":"ViewStatus", + "documentation":"

    Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content.

    " + }, + "Type":{ + "shape":"ViewType", + "documentation":"

    The type of the view - CUSTOMER_MANAGED.

    " + }, + "Description":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view.

    " + }, + "Version":{ + "shape":"ViewVersion", + "documentation":"

    Current version of the view.

    " + }, + "VersionDescription":{ + "shape":"ViewDescription", + "documentation":"

    The description of the version.

    " + }, + "Content":{ + "shape":"ViewContent", + "documentation":"

    View content containing all content necessary to render a view except for runtime input data.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags associated with the view resource (not specific to view version).

    " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the view was created.

    " + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

    Latest timestamp of the UpdateViewContent or CreateViewVersion operations.

    " + }, + "ViewContentSha256":{ + "shape":"ViewContentSha256", + "documentation":"

    Indicates the checksum value of the latest published view content.

    " + } + }, + "documentation":"

    A view resource object. Contains metadata and content necessary to render the view.

    " + }, + "ViewAction":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([\\p{L}\\p{N}_.:\\/=+\\-@()']+[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@()']*)$", + "sensitive":true + }, + "ViewActions":{ + "type":"list", + "member":{"shape":"ViewAction"} + }, + "ViewContent":{ + "type":"structure", + "members":{ + "InputSchema":{ + "shape":"ViewInputSchema", + "documentation":"

    The data schema matching data that the view template must be provided to render.

    " + }, + "Template":{ + "shape":"ViewTemplate", + "documentation":"

    The view template representing the structure of the view.

    " + }, + "Actions":{ + "shape":"ViewActions", + "documentation":"

    A list of possible actions from the view.

    " + } + }, + "documentation":"

    View content containing all content necessary to render a view except for runtime input data.

    " + }, + "ViewContentSha256":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9]$" + }, + "ViewDescription":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"^([\\p{L}\\p{N}_.:\\/=+\\-@,()']+[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@,()']*)$" + }, + "ViewId":{ + "type":"string", + "max":500, + "min":1, + "pattern":"^[a-zA-Z0-9\\_\\-:\\/$]+$" + }, + "ViewInputContent":{ + "type":"structure", + "members":{ + "Template":{ + "shape":"ViewTemplate", + "documentation":"

    The view template representing the structure of the view.

    " + }, + "Actions":{ + "shape":"ViewActions", + "documentation":"

    A list of possible actions from the view.

    " + } + }, + "documentation":"

    View content containing all content necessary to render a view except for runtime input data and the runtime input schema, which is auto-generated by this operation.

    " + }, + "ViewInputSchema":{ + "type":"string", + "sensitive":true + }, + "ViewName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([\\p{L}\\p{N}_.:\\/=+\\-@()']+[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@()']*)$", + "sensitive":true + }, + "ViewStatus":{ + "type":"string", + "enum":[ + "PUBLISHED", + "SAVED" + ] + }, + "ViewSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view.

    " + }, + "Arn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the view.

    " + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view.

    " + }, + "Type":{ + "shape":"ViewType", + "documentation":"

    The type of the view.

    " + }, + "Status":{ + "shape":"ViewStatus", + "documentation":"

    Indicates the view status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content.

    " + }, + "Description":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view.

    " + } + }, + "documentation":"

    A summary of a view's metadata.

    " + }, + "ViewTemplate":{"type":"string"}, + "ViewType":{ + "type":"string", + "enum":[ + "CUSTOMER_MANAGED", + "AWS_MANAGED" + ] + }, + "ViewVersion":{"type":"integer"}, + "ViewVersionSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view version.

    " + }, + "Arn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the view version.

    " + }, + "Description":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view version.

    " + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view version.

    " + }, + "Type":{ + "shape":"ViewType", + "documentation":"

    The type of the view version.

    " + }, + "Version":{ + "shape":"ViewVersion", + "documentation":"

    The sequentially incremented version of the view version.

    " + }, + "VersionDescription":{ + "shape":"ViewDescription", + "documentation":"

    The description of the view version.

    " + } + }, + "documentation":"

    A summary of a view version's metadata.

    " + }, + "ViewVersionSummaryList":{ + "type":"list", + "member":{"shape":"ViewVersionSummary"} + }, + "ViewsClientToken":{ + "type":"string", + "max":500, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)$" + }, + "ViewsInstanceId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9\\_\\-:\\/]+$" + }, + "ViewsNextToken":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"^[a-zA-Z0-9=\\/+_.-]+$" + }, + "ViewsSummaryList":{ + "type":"list", + "member":{"shape":"ViewSummary"} + }, "Vocabulary":{ "type":"structure", "required":[ diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index c90fe4a77fbf..b259aecf3284 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-rule-set.json index 53e8ed2565b8..59d03098d77d 100644 --- a/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://connect-campaigns-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://connect-campaigns-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -238,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://connect-campaigns.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://connect-campaigns.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://connect-campaigns.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://connect-campaigns.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-tests.json b/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-tests.json index a1b02da028d0..724d5f23b553 100644 --- a/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/connectcampaigns/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,42 @@ { "testCases": [ { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.us-west-2.api.aws" + "url": "https://connect-campaigns.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.us-west-2.amazonaws.com" + "url": "https://connect-campaigns.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.us-west-2.api.aws" + "url": "https://connect-campaigns.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,178 +47,274 @@ } }, "params": { - "UseDualStack": false, "Region": "us-west-2", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.eu-west-2.api.aws" + "url": "https://connect-campaigns-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.eu-west-2.amazonaws.com" + "url": "https://connect-campaigns-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.eu-west-2.api.aws" + "url": "https://connect-campaigns.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.eu-west-2.amazonaws.com" + "url": "https://connect-campaigns-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.ap-southeast-2.api.aws" + "url": "https://connect-campaigns-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.ap-southeast-2.amazonaws.com" + "url": "https://connect-campaigns.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.ap-southeast-2.api.aws" + "url": "https://connect-campaigns.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.ap-southeast-2.amazonaws.com" + "url": "https://connect-campaigns-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.us-east-1.api.aws" + "url": "https://connect-campaigns-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://connect-campaigns-fips.us-east-1.amazonaws.com" + "url": "https://connect-campaigns.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.us-east-1.api.aws" + "url": "https://connect-campaigns.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://connect-campaigns.us-east-1.amazonaws.com" + "url": "https://connect-campaigns-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://connect-campaigns.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://connect-campaigns-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://connect-campaigns.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -228,9 +324,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -240,11 +336,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/connectcampaigns/src/main/resources/codegen-resources/service-2.json b/services/connectcampaigns/src/main/resources/codegen-resources/service-2.json index fcabbb1d6fc4..34df4801e277 100644 --- a/services/connectcampaigns/src/main/resources/codegen-resources/service-2.json +++ b/services/connectcampaigns/src/main/resources/codegen-resources/service-2.json @@ -436,6 +436,13 @@ }, "exception":true }, + "AgentlessDialerConfig":{ + "type":"structure", + "members":{ + "dialingCapacity":{"shape":"DialingCapacity"} + }, + "documentation":"

    Agentless Dialer config

    " + }, "AnswerMachineDetectionConfig":{ "type":"structure", "required":["enableAnswerMachineDetection"], @@ -452,14 +459,14 @@ "documentation":"

    Arn

    ", "max":500, "min":20, - "pattern":"^arn:.*" + "pattern":"arn:.*" }, "AttributeName":{ "type":"string", "documentation":"

    The key of the attribute. Attribute keys can include only alphanumeric, dash, and underscore characters.

    ", "max":32767, "min":0, - "pattern":"^[a-zA-Z0-9\\-_]+$" + "pattern":"[a-zA-Z0-9\\-_]+" }, "AttributeValue":{ "type":"string", @@ -488,19 +495,19 @@ "Campaign":{ "type":"structure", "required":[ + "id", "arn", + "name", "connectInstanceId", "dialerConfig", - "id", - "name", "outboundCallConfig" ], "members":{ + "id":{"shape":"CampaignId"}, "arn":{"shape":"CampaignArn"}, + "name":{"shape":"CampaignName"}, "connectInstanceId":{"shape":"InstanceId"}, "dialerConfig":{"shape":"DialerConfig"}, - "id":{"shape":"CampaignId"}, - "name":{"shape":"CampaignName"}, "outboundCallConfig":{"shape":"OutboundCallConfig"}, "tags":{"shape":"TagMap"} }, @@ -545,16 +552,16 @@ "CampaignSummary":{ "type":"structure", "required":[ - "arn", - "connectInstanceId", "id", - "name" + "arn", + "name", + "connectInstanceId" ], "members":{ - "arn":{"shape":"CampaignArn"}, - "connectInstanceId":{"shape":"InstanceId"}, "id":{"shape":"CampaignId"}, - "name":{"shape":"CampaignName"} + "arn":{"shape":"CampaignArn"}, + "name":{"shape":"CampaignName"}, + "connectInstanceId":{"shape":"InstanceId"} }, "documentation":"

    An Amazon Connect campaign summary.

    " }, @@ -596,15 +603,15 @@ "CreateCampaignRequest":{ "type":"structure", "required":[ + "name", "connectInstanceId", "dialerConfig", - "name", "outboundCallConfig" ], "members":{ + "name":{"shape":"CampaignName"}, "connectInstanceId":{"shape":"InstanceId"}, "dialerConfig":{"shape":"DialerConfig"}, - "name":{"shape":"CampaignName"}, "outboundCallConfig":{"shape":"OutboundCallConfig"}, "tags":{"shape":"TagMap"} }, @@ -613,8 +620,8 @@ "CreateCampaignResponse":{ "type":"structure", "members":{ - "arn":{"shape":"CampaignArn"}, "id":{"shape":"CampaignId"}, + "arn":{"shape":"CampaignArn"}, "tags":{"shape":"TagMap"} }, "documentation":"

    The response for Create Campaign API

    " @@ -684,16 +691,16 @@ "DialRequest":{ "type":"structure", "required":[ - "attributes", "clientToken", + "phoneNumber", "expirationTime", - "phoneNumber" + "attributes" ], "members":{ - "attributes":{"shape":"Attributes"}, "clientToken":{"shape":"ClientToken"}, + "phoneNumber":{"shape":"DestinationPhoneNumber"}, "expirationTime":{"shape":"TimeStamp"}, - "phoneNumber":{"shape":"DestinationPhoneNumber"} + "attributes":{"shape":"Attributes"} }, "documentation":"

    A dial request for a campaign.

    " }, @@ -711,12 +718,20 @@ "DialerConfig":{ "type":"structure", "members":{ + "progressiveDialerConfig":{"shape":"ProgressiveDialerConfig"}, "predictiveDialerConfig":{"shape":"PredictiveDialerConfig"}, - "progressiveDialerConfig":{"shape":"ProgressiveDialerConfig"} + "agentlessDialerConfig":{"shape":"AgentlessDialerConfig"} }, "documentation":"

    The possible types of dialer config parameters

    ", "union":true }, + "DialingCapacity":{ + "type":"double", + "documentation":"

    Allocates dialing capacity for this campaign between multiple active campaigns

    ", + "box":true, + "max":1, + "min":0.01 + }, "Enabled":{ "type":"boolean", "documentation":"

    Boolean to indicate if custom encryption has been enabled.

    " @@ -761,8 +776,8 @@ "type":"structure", "members":{ "clientToken":{"shape":"ClientToken"}, - "failureCode":{"shape":"FailureCode"}, - "id":{"shape":"DialRequestId"} + "id":{"shape":"DialRequestId"}, + "failureCode":{"shape":"FailureCode"} }, "documentation":"

    A failed request identified by the unique client token.

    " }, @@ -808,8 +823,8 @@ "GetCampaignStateBatchResponse":{ "type":"structure", "members":{ - "failedRequests":{"shape":"FailedCampaignStateResponseList"}, - "successfulRequests":{"shape":"SuccessfulCampaignStateResponseList"} + "successfulRequests":{"shape":"SuccessfulCampaignStateResponseList"}, + "failedRequests":{"shape":"FailedCampaignStateResponseList"} }, "documentation":"

    GetCampaignStateBatchResponse

    " }, @@ -874,13 +889,13 @@ "type":"structure", "required":[ "connectInstanceId", - "encryptionConfig", - "serviceLinkedRoleArn" + "serviceLinkedRoleArn", + "encryptionConfig" ], "members":{ "connectInstanceId":{"shape":"InstanceId"}, - "encryptionConfig":{"shape":"EncryptionConfig"}, - "serviceLinkedRoleArn":{"shape":"ServiceLinkedRoleArn"} + "serviceLinkedRoleArn":{"shape":"ServiceLinkedRoleArn"}, + "encryptionConfig":{"shape":"EncryptionConfig"} }, "documentation":"

    Instance config object

    " }, @@ -893,12 +908,12 @@ "InstanceIdFilter":{ "type":"structure", "required":[ - "operator", - "value" + "value", + "operator" ], "members":{ - "operator":{"shape":"InstanceIdFilterOperator"}, - "value":{"shape":"InstanceId"} + "value":{"shape":"InstanceId"}, + "operator":{"shape":"InstanceIdFilterOperator"} }, "documentation":"

    Connect instance identifier filter

    " }, @@ -927,8 +942,8 @@ ], "members":{ "connectInstanceId":{"shape":"InstanceId"}, - "failureCode":{"shape":"InstanceOnboardingJobFailureCode"}, - "status":{"shape":"InstanceOnboardingJobStatusCode"} + "status":{"shape":"InstanceOnboardingJobStatusCode"}, + "failureCode":{"shape":"InstanceOnboardingJobFailureCode"} }, "documentation":"

    Instance onboarding job status object

    " }, @@ -961,12 +976,12 @@ "InvalidCampaignStateException":{ "type":"structure", "required":[ - "message", - "state" + "state", + "message" ], "members":{ - "message":{"shape":"String"}, "state":{"shape":"CampaignState"}, + "message":{"shape":"String"}, "xAmzErrorType":{ "shape":"XAmazonErrorType", "location":"header", @@ -1001,17 +1016,17 @@ "ListCampaignsRequest":{ "type":"structure", "members":{ - "filters":{"shape":"CampaignFilters"}, "maxResults":{"shape":"MaxResults"}, - "nextToken":{"shape":"NextToken"} + "nextToken":{"shape":"NextToken"}, + "filters":{"shape":"CampaignFilters"} }, "documentation":"

    ListCampaignsRequest

    " }, "ListCampaignsResponse":{ "type":"structure", "members":{ - "campaignSummaryList":{"shape":"CampaignSummaryList"}, - "nextToken":{"shape":"NextToken"} + "nextToken":{"shape":"NextToken"}, + "campaignSummaryList":{"shape":"CampaignSummaryList"} }, "documentation":"

    ListCampaignsResponse

    " }, @@ -1049,15 +1064,12 @@ }, "OutboundCallConfig":{ "type":"structure", - "required":[ - "connectContactFlowId", - "connectQueueId" - ], + "required":["connectContactFlowId"], "members":{ - "answerMachineDetectionConfig":{"shape":"AnswerMachineDetectionConfig"}, "connectContactFlowId":{"shape":"ContactFlowId"}, + "connectSourcePhoneNumber":{"shape":"SourcePhoneNumber"}, "connectQueueId":{"shape":"QueueId"}, - "connectSourcePhoneNumber":{"shape":"SourcePhoneNumber"} + "answerMachineDetectionConfig":{"shape":"AnswerMachineDetectionConfig"} }, "documentation":"

    The configuration used for outbound calls.

    " }, @@ -1077,7 +1089,8 @@ "type":"structure", "required":["bandwidthAllocation"], "members":{ - "bandwidthAllocation":{"shape":"BandwidthAllocation"} + "bandwidthAllocation":{"shape":"BandwidthAllocation"}, + "dialingCapacity":{"shape":"DialingCapacity"} }, "documentation":"

    Predictive Dialer config

    " }, @@ -1085,31 +1098,32 @@ "type":"structure", "required":["bandwidthAllocation"], "members":{ - "bandwidthAllocation":{"shape":"BandwidthAllocation"} + "bandwidthAllocation":{"shape":"BandwidthAllocation"}, + "dialingCapacity":{"shape":"DialingCapacity"} }, "documentation":"

    Progressive Dialer config

    " }, "PutDialRequestBatchRequest":{ "type":"structure", "required":[ - "dialRequests", - "id" + "id", + "dialRequests" ], "members":{ - "dialRequests":{"shape":"DialRequestList"}, "id":{ "shape":"CampaignId", "location":"uri", "locationName":"id" - } + }, + "dialRequests":{"shape":"DialRequestList"} }, "documentation":"

    PutDialRequestBatchRequest

    " }, "PutDialRequestBatchResponse":{ "type":"structure", "members":{ - "failedRequests":{"shape":"FailedRequestList"}, - "successfulRequests":{"shape":"SuccessfulRequestList"} + "successfulRequests":{"shape":"SuccessfulRequestList"}, + "failedRequests":{"shape":"FailedRequestList"} }, "documentation":"

    PutDialRequestBatchResponse

    " }, @@ -1262,7 +1276,7 @@ "documentation":"

    Tag key.

    ", "max":128, "min":1, - "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + "pattern":"(?!aws:)[a-zA-Z+-=._:/]+" }, "TagKeyList":{ "type":"list", @@ -1346,16 +1360,16 @@ "UpdateCampaignDialerConfigRequest":{ "type":"structure", "required":[ - "dialerConfig", - "id" + "id", + "dialerConfig" ], "members":{ - "dialerConfig":{"shape":"DialerConfig"}, "id":{ "shape":"CampaignId", "location":"uri", "locationName":"id" - } + }, + "dialerConfig":{"shape":"DialerConfig"} }, "documentation":"

    UpdateCampaignDialerConfigRequest

    " }, @@ -1379,14 +1393,14 @@ "type":"structure", "required":["id"], "members":{ - "answerMachineDetectionConfig":{"shape":"AnswerMachineDetectionConfig"}, - "connectContactFlowId":{"shape":"ContactFlowId"}, - "connectSourcePhoneNumber":{"shape":"SourcePhoneNumber"}, "id":{ "shape":"CampaignId", "location":"uri", "locationName":"id" - } + }, + "connectContactFlowId":{"shape":"ContactFlowId"}, + "connectSourcePhoneNumber":{"shape":"SourcePhoneNumber"}, + "answerMachineDetectionConfig":{"shape":"AnswerMachineDetectionConfig"} }, "documentation":"

    UpdateCampaignOutboundCallConfigRequest

    " }, diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index afaa98d72c94..79861597b37a 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index bdb8ac1a52cf..c1fc341def24 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 0db6fa4b8532..300c9cacdceb 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/connectparticipant/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/connectparticipant/src/main/resources/codegen-resources/endpoint-rule-set.json index 1662572daf81..eaed7a9924e3 100644 --- a/services/connectparticipant/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/connectparticipant/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://participant.connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://participant.connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://participant.connect.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://participant.connect-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://participant.connect.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://participant.connect-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://participant.connect.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://participant.connect.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://participant.connect.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://participant.connect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/connectparticipant/src/main/resources/codegen-resources/endpoint-tests.json b/services/connectparticipant/src/main/resources/codegen-resources/endpoint-tests.json index 3811e928b32d..3a270c6ff31b 100644 --- a/services/connectparticipant/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/connectparticipant/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-gov-west-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-gov-west-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-gov-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-gov-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": true, "Region": "us-gov-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-gov-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -188,9 +188,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-iso-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -201,9 +201,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-iso-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -212,9 +212,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-iso-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -225,9 +225,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-iso-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -249,9 +249,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -273,9 +273,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -286,9 +286,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -300,8 +300,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -311,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -323,9 +323,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/connectparticipant/src/main/resources/codegen-resources/service-2.json b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json index 97d2c2e3bb41..15131de43c3f 100644 --- a/services/connectparticipant/src/main/resources/codegen-resources/service-2.json +++ b/services/connectparticipant/src/main/resources/codegen-resources/service-2.json @@ -47,6 +47,23 @@ ], "documentation":"

    Creates the participant's connection.

    ParticipantToken is used for invoking this API instead of ConnectionToken.

    The participant token is valid for the lifetime of the participant – until they are part of a contact.

    The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic.

    For chat, you need to publish the following on the established websocket connection:

    {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}

    Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before.

    Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

    Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide.

    The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

    " }, + "DescribeView":{ + "name":"DescribeView", + "http":{ + "method":"GET", + "requestUri":"/participant/views/{ViewToken}" + }, + "input":{"shape":"DescribeViewRequest"}, + "output":{"shape":"DescribeViewResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Retrieves the view for the specified view token.

    " + }, "DisconnectParticipant":{ "name":"DisconnectParticipant", "http":{ @@ -146,6 +163,7 @@ } }, "shapes":{ + "ARN":{"type":"string"}, "AccessDeniedException":{ "type":"structure", "required":["Message"], @@ -329,7 +347,7 @@ "members":{ "Type":{ "shape":"ConnectionTypeList", - "documentation":"

    Type of connection information required. This can be omitted if ConnectParticipant is true.

    " + "documentation":"

    Type of connection information required. If you need CONNECTION_CREDENTIALS along with marking participant as connected, pass CONNECTION_CREDENTIALS in Type.

    " }, "ParticipantToken":{ "shape":"ParticipantToken", @@ -356,6 +374,36 @@ } } }, + "DescribeViewRequest":{ + "type":"structure", + "required":[ + "ViewToken", + "ConnectionToken" + ], + "members":{ + "ViewToken":{ + "shape":"ViewToken", + "documentation":"

    An encrypted token originating from the interactive message of a ShowView block operation. Represents the desired view.

    ", + "location":"uri", + "locationName":"ViewToken" + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

    The connection token.

    ", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "DescribeViewResponse":{ + "type":"structure", + "members":{ + "View":{ + "shape":"View", + "documentation":"

    A view resource object. Contains metadata and content necessary to render the view.

    " + } + } + }, "DisconnectParticipantRequest":{ "type":"structure", "required":["ConnectionToken"], @@ -585,7 +633,8 @@ "enum":[ "AGENT", "CUSTOMER", - "SYSTEM" + "SYSTEM", + "CUSTOM_BOT" ] }, "ParticipantToken":{ @@ -630,6 +679,36 @@ "type":"list", "member":{"shape":"Receipt"} }, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

    The identifier of the resource.

    " + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of Amazon Connect resource.

    " + } + }, + "documentation":"

    The resource was not found.

    ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "CONTACT", + "CONTACT_FLOW", + "INSTANCE", + "PARTICIPANT", + "HIERARCHY_LEVEL", + "HIERARCHY_GROUP", + "USER" + ] + }, "ScanDirection":{ "type":"string", "enum":[ @@ -865,6 +944,88 @@ "error":{"httpStatusCode":400}, "exception":true }, + "View":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ViewId", + "documentation":"

    The identifier of the view.

    " + }, + "Arn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the view.

    " + }, + "Name":{ + "shape":"ViewName", + "documentation":"

    The name of the view.

    " + }, + "Version":{ + "shape":"ViewVersion", + "documentation":"

    The current version of the view.

    " + }, + "Content":{ + "shape":"ViewContent", + "documentation":"

    View content containing all content necessary to render a view except for runtime input data.

    " + } + }, + "documentation":"

    A view resource object. Contains metadata and content necessary to render the view.

    " + }, + "ViewAction":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([\\p{L}\\p{N}_.:\\/=+\\-@()']+[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@()']*)$", + "sensitive":true + }, + "ViewActions":{ + "type":"list", + "member":{"shape":"ViewAction"} + }, + "ViewContent":{ + "type":"structure", + "members":{ + "InputSchema":{ + "shape":"ViewInputSchema", + "documentation":"

    The schema representing the input data that the view template must be supplied to render.

    " + }, + "Template":{ + "shape":"ViewTemplate", + "documentation":"

    The view template representing the structure of the view.

    " + }, + "Actions":{ + "shape":"ViewActions", + "documentation":"

    A list of actions possible from the view

    " + } + }, + "documentation":"

    View content containing all content necessary to render a view except for runtime input data.

    " + }, + "ViewId":{ + "type":"string", + "max":500, + "min":1, + "pattern":"^[a-zA-Z0-9\\_\\-:\\/$]+$" + }, + "ViewInputSchema":{ + "type":"string", + "sensitive":true + }, + "ViewName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([\\p{L}\\p{N}_.:\\/=+\\-@()']+[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@()']*)$", + "sensitive":true + }, + "ViewTemplate":{ + "type":"string", + "sensitive":true + }, + "ViewToken":{ + "type":"string", + "max":1000, + "min":1 + }, + "ViewVersion":{"type":"integer"}, "Websocket":{ "type":"structure", "members":{ diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 99f800c9bb0b..1d117f974e01 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 07a3642acc5a..fb0608ab5dfa 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costandusagereport/src/main/resources/codegen-resources/customization.config b/services/costandusagereport/src/main/resources/codegen-resources/customization.config index 6d58177dfd55..bb24e28984aa 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/customization.config +++ b/services/costandusagereport/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeReportDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteReportDefinition" ] } diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index d0e7f59dbd1f..b0228462c731 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/customization.config b/services/costexplorer/src/main/resources/codegen-resources/customization.config index a8254fa7e84f..09691b4543c1 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/customization.config +++ b/services/costexplorer/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["getCostAndUsage"] + "excludedSimpleMethods" : ["getCostAndUsage"] } diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index e4c99e99633a..0a970cbf0e75 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,342 +115,302 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws" + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws-cn" + "name" ] }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "cn-northwest-1" } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "cn-northwest-1" - } - ] + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" + true + ] }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 37fbde371ede..3a4839af8010 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -308,7 +308,7 @@ {"shape":"LimitExceededException"}, {"shape":"DataUnavailableException"} ], - "documentation":"

    Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.

    " + "documentation":"

    Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the cost, coverage, and utilization charts.

    " }, "GetSavingsPlansCoverage":{ "name":"GetSavingsPlansCoverage", @@ -828,6 +828,14 @@ "Status":{ "shape":"CostAllocationTagStatus", "documentation":"

    The status of a cost allocation tag.

    " + }, + "LastUpdatedDate":{ + "shape":"ZonedDateTime", + "documentation":"

    The last date that the tag was either activated or deactivated.

    " + }, + "LastUsedDate":{ + "shape":"ZonedDateTime", + "documentation":"

    The last month that the tag was used on an Amazon Web Services resource.

    " } }, "documentation":"

    The cost allocation tag structure. This includes detailed metadata for the CostAllocationTag object.

    " diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 3c0eef5654b0..e9804c5407c0 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json index a70609892ae3..02edb0b68dbc 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://profile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://profile-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://profile-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://profile-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://profile.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://profile.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://profile.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://profile.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index 2faa269c4324..4d16bfabf869 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -995,7 +995,8 @@ "documentation":"

    The postal code of a customer address.

    " } }, - "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    " + "documentation":"

    A generic address associated with the customer that is not mailing, shipping, or billing.

    ", + "sensitive":true }, "AddressList":{ "type":"list", @@ -1124,7 +1125,8 @@ "documentation":"

    Mathematical expression that is performed on attribute items provided in the attribute list. Each element in the expression should follow the structure of \\\"{ObjectTypeName.AttributeName}\\\".

    " } }, - "documentation":"

    Mathematical expression and a list of attribute items specified in that expression.

    " + "documentation":"

    Mathematical expression and a list of attribute items specified in that expression.

    ", + "sensitive":true }, "AttributeItem":{ "type":"structure", @@ -1181,7 +1183,8 @@ "Attributes":{ "type":"map", "key":{"shape":"string1To255"}, - "value":{"shape":"string1To255"} + "value":{"shape":"string1To255"}, + "sensitive":true }, "AutoMerging":{ "type":"structure", @@ -1272,7 +1275,8 @@ "documentation":"

    The threshold for the calculated attribute.

    " } }, - "documentation":"

    The conditions including range, object count, and threshold for the calculated attribute.

    " + "documentation":"

    The conditions including range, object count, and threshold for the calculated attribute.

    ", + "sensitive":true }, "ConflictResolution":{ "type":"structure", @@ -1622,11 +1626,11 @@ "locationName":"DomainName" }, "AccountNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    A unique account number that you have given to the customer.

    " }, "AdditionalInformation":{ - "shape":"string1To1000", + "shape":"sensitiveString1To1000", "documentation":"

    Any additional information relevant to the customer’s profile.

    " }, "PartyType":{ @@ -1634,23 +1638,23 @@ "documentation":"

    The type of profile used to describe the customer.

    " }, "BusinessName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The name of the customer’s business.

    " }, "FirstName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s first name.

    " }, "MiddleName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s middle name.

    " }, "LastName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s last name.

    " }, "BirthDate":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s birth date.

    " }, "Gender":{ @@ -1658,31 +1662,31 @@ "documentation":"

    The gender with which the customer identifies.

    " }, "PhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s phone number, which has not been specified as a mobile, home, or business number.

    " }, "MobilePhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s mobile phone number.

    " }, "HomePhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s home phone number.

    " }, "BusinessPhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s business phone number.

    " }, "EmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s email address, which has not been specified as a personal or business address.

    " }, "PersonalEmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s personal email address.

    " }, "BusinessEmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s business email address.

    " }, "Address":{ @@ -1706,11 +1710,11 @@ "documentation":"

    A key value pair of attributes of a customer profile.

    " }, "PartyTypeString":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    An alternative to PartyType which accepts any string as input.

    " }, "GenderString":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    An alternative to Gender which accepts any string as input.

    " } } @@ -2184,7 +2188,8 @@ "FieldMap":{ "type":"map", "key":{"shape":"name"}, - "value":{"shape":"ObjectTypeField"} + "value":{"shape":"ObjectTypeField"}, + "sensitive":true }, "FieldNameList":{ "type":"list", @@ -2315,7 +2320,8 @@ "documentation":"

    The trigger settings that determine how and when the flow runs.

    " } }, - "documentation":"

    The configurations that control how Customer Profiles retrieves data from the source, Amazon AppFlow. Customer Profiles uses this information to create an AppFlow flow on behalf of customers.

    " + "documentation":"

    The configurations that control how Customer Profiles retrieves data from the source, Amazon AppFlow. Customer Profiles uses this information to create an AppFlow flow on behalf of customers.

    ", + "sensitive":true }, "FlowDescription":{ "type":"string", @@ -2348,7 +2354,8 @@ "MALE", "FEMALE", "UNSPECIFIED" - ] + ], + "sensitive":true }, "GetAutoMergingPreviewRequest":{ "type":"structure", @@ -2848,7 +2855,7 @@ "documentation":"

    The name of the profile object type.

    " }, "Description":{ - "shape":"text", + "shape":"sensitiveText", "documentation":"

    The description of the profile object type.

    " }, "TemplateId":{ @@ -3265,7 +3272,8 @@ "KeyMap":{ "type":"map", "key":{"shape":"name"}, - "value":{"shape":"ObjectTypeKeyList"} + "value":{"shape":"ObjectTypeKeyList"}, + "sensitive":true }, "KmsArn":{ "type":"string", @@ -4302,7 +4310,8 @@ "INDIVIDUAL", "BUSINESS", "OTHER" - ] + ], + "sensitive":true }, "PhoneNumberList":{ "type":"list", @@ -4318,11 +4327,11 @@ "documentation":"

    The unique identifier of a customer profile.

    " }, "AccountNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    A unique account number that you have given to the customer.

    " }, "AdditionalInformation":{ - "shape":"string1To1000", + "shape":"sensitiveString1To1000", "documentation":"

    Any additional information relevant to the customer’s profile.

    " }, "PartyType":{ @@ -4330,23 +4339,23 @@ "documentation":"

    The type of profile used to describe the customer.

    " }, "BusinessName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The name of the customer’s business.

    " }, "FirstName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s first name.

    " }, "MiddleName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s middle name.

    " }, "LastName":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s last name.

    " }, "BirthDate":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s birth date.

    " }, "Gender":{ @@ -4354,31 +4363,31 @@ "documentation":"

    The gender with which the customer identifies.

    " }, "PhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer's phone number, which has not been specified as a mobile, home, or business number.

    " }, "MobilePhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s mobile phone number.

    " }, "HomePhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s home phone number.

    " }, "BusinessPhoneNumber":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s home phone number.

    " }, "EmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s email address, which has not been specified as a personal or business address.

    " }, "PersonalEmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s personal email address.

    " }, "BusinessEmailAddress":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    The customer’s business email address.

    " }, "Address":{ @@ -4406,11 +4415,11 @@ "documentation":"

    A list of items used to find a profile returned in a SearchProfiles response. An item is a key-value(s) pair that matches an attribute in the profile.

    If the optional AdditionalSearchKeys parameter was included in the SearchProfiles request, the FoundByItems list should be interpreted based on the LogicalOperator used in the request:

    • AND - The profile included in the response matched all of the search keys specified in the request. The FoundByItems will include all of the key-value(s) pairs that were specified in the request (as this is a requirement of AND search logic).

    • OR - The profile included in the response matched at least one of the search keys specified in the request. The FoundByItems will include each of the key-value(s) pairs that the profile was found by.

    The OR relationship is the default behavior if the LogicalOperator parameter is not included in the SearchProfiles request.

    " }, "PartyTypeString":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    An alternative to PartyType which accepts any string as input.

    " }, "GenderString":{ - "shape":"string1To255", + "shape":"sensitiveString1To255", "documentation":"

    An alternative to Gender which accepts any string as input.

    " } }, @@ -4436,7 +4445,8 @@ }, "ProfileObjectTypeList":{ "type":"list", - "member":{"shape":"ListProfileObjectTypeItem"} + "member":{"shape":"ListProfileObjectTypeItem"}, + "sensitive":true }, "ProfileObjectTypeTemplateList":{ "type":"list", @@ -4580,7 +4590,7 @@ "locationName":"ObjectTypeName" }, "Description":{ - "shape":"text", + "shape":"sensitiveText", "documentation":"

    Description of the profile object type.

    " }, "TemplateId":{ @@ -4629,7 +4639,7 @@ "documentation":"

    The name of the profile object type.

    " }, "Description":{ - "shape":"text", + "shape":"sensitiveText", "documentation":"

    Description of the profile object type.

    " }, "TemplateId":{ @@ -5128,7 +5138,8 @@ "MAXIMUM", "AVERAGE", "MAX_OCCURRENCE" - ] + ], + "sensitive":true }, "Status":{ "type":"string", @@ -5384,12 +5395,14 @@ "documentation":"

    The postal code of a customer address.

    " } }, - "documentation":"

    Updates associated with the address properties of a customer profile.

    " + "documentation":"

    Updates associated with the address properties of a customer profile.

    ", + "sensitive":true }, "UpdateAttributes":{ "type":"map", "key":{"shape":"string1To255"}, - "value":{"shape":"string0To255"} + "value":{"shape":"string0To255"}, + "sensitive":true }, "UpdateCalculatedAttributeDefinitionRequest":{ "type":"structure", @@ -5565,11 +5578,11 @@ "documentation":"

    The unique identifier of a customer profile.

    " }, "AdditionalInformation":{ - "shape":"string0To1000", + "shape":"sensitiveString0To1000", "documentation":"

    Any additional information relevant to the customer’s profile.

    " }, "AccountNumber":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    A unique account number that you have given to the customer.

    " }, "PartyType":{ @@ -5577,23 +5590,23 @@ "documentation":"

    The type of profile used to describe the customer.

    " }, "BusinessName":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The name of the customer’s business.

    " }, "FirstName":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s first name.

    " }, "MiddleName":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s middle name.

    " }, "LastName":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s last name.

    " }, "BirthDate":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s birth date.

    " }, "Gender":{ @@ -5601,31 +5614,31 @@ "documentation":"

    The gender with which the customer identifies.

    " }, "PhoneNumber":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s phone number, which has not been specified as a mobile, home, or business number.

    " }, "MobilePhoneNumber":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s mobile phone number.

    " }, "HomePhoneNumber":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s home phone number.

    " }, "BusinessPhoneNumber":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s business phone number.

    " }, "EmailAddress":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s email address, which has not been specified as a personal or business address.

    " }, "PersonalEmailAddress":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s personal email address.

    " }, "BusinessEmailAddress":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    The customer’s business email address.

    " }, "Address":{ @@ -5649,11 +5662,11 @@ "documentation":"

    A key value pair of attributes of a customer profile.

    " }, "PartyTypeString":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    An alternative to PartyType which accepts any string as input.

    " }, "GenderString":{ - "shape":"string0To255", + "shape":"sensitiveString0To255", "documentation":"

    An alternative to Gender which accepts any string as input.

    " } } @@ -5821,14 +5834,39 @@ "min":1, "pattern":".*" }, - "sqsQueueUrl":{ + "sensitiveString0To1000":{ + "type":"string", + "max":1000, + "min":0, + "sensitive":true + }, + "sensitiveString0To255":{ "type":"string", "max":255, - "min":0 + "min":0, + "sensitive":true }, - "string0To1000":{ + "sensitiveString1To1000":{ "type":"string", "max":1000, + "min":1, + "sensitive":true + }, + "sensitiveString1To255":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "sensitiveText":{ + "type":"string", + "max":1000, + "min":1, + "sensitive":true + }, + "sqsQueueUrl":{ + "type":"string", + "max":255, "min":0 }, "string0To255":{ @@ -5854,7 +5892,8 @@ "stringifiedJson":{ "type":"string", "max":256000, - "min":1 + "min":1, + "sensitive":true }, "text":{ "type":"string", diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 69434417dbf2..1ade867af520 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/customization.config b/services/databasemigration/src/main/resources/codegen-resources/customization.config index 76e134825e24..c746548fecc0 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/customization.config +++ b/services/databasemigration/src/main/resources/codegen-resources/customization.config @@ -13,7 +13,7 @@ "describeReplicationSubnetGroups", "describeReplicationTasks" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeReplicationTaskAssessmentResults" ] } diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 91a9ed293120..57364176d6e9 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index a96df35d8c9e..fd8eaa10593f 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index f17c62eba44f..50ab1715e6e7 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 8ba289c3d009..b079c43f57af 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index 1260e47922d1..0fe7d4a12b1a 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -223,7 +223,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Configures a task, which defines where and how DataSync transfers your data.

    A task includes a source location, a destination location, and the preferences for how and when you want to transfer your data (such as bandwidth limits, scheduling, among other options).

    If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.

    " + "documentation":"

    Configures a transfer task, which defines where and how DataSync moves your data.

    A task includes a source location, destination location, and the options for how and when you want to transfer your data (such as bandwidth limits, scheduling, among other options).

    If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.

    " }, "DeleteAgent":{ "name":"DeleteAgent", @@ -265,7 +265,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Deletes an DataSync task.

    " + "documentation":"

    Deletes an DataSync transfer task.

    " }, "DescribeAgent":{ "name":"DescribeAgent", @@ -521,7 +521,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Provides information about an DataSync transfer task that's running.

    " + "documentation":"

    Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing transfer or check the results of the transfer.

    " }, "GenerateRecommendations":{ "name":"GenerateRecommendations", @@ -680,7 +680,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Starts an DataSync task. For each task, you can only run one task execution at a time.

    There are several phases to a task execution. For more information, see Task execution statuses.

    If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.

    " + "documentation":"

    Starts an DataSync transfer task. For each task, you can only run one task execution at a time.

    There are several phases to a task execution. For more information, see Task execution statuses.

    If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.

    " }, "StopDiscoveryJob":{ "name":"StopDiscoveryJob", @@ -851,7 +851,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Updates the metadata associated with a task.

    " + "documentation":"

    Updates the configuration of a DataSync transfer task.

    " }, "UpdateTaskExecution":{ "name":"UpdateTaskExecution", @@ -865,7 +865,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Modifies a running DataSync task.

    Currently, the only Option that you can modify with UpdateTaskExecution is BytesPerSecond , which throttles bandwidth for a running or queued task.

    " + "documentation":"

    Updates the configuration of a running DataSync task execution.

    Currently, the only Option that you can modify with UpdateTaskExecution is BytesPerSecond , which throttles bandwidth for a running or queued task execution.

    " } }, "shapes":{ @@ -999,7 +999,7 @@ "members":{ "Token":{ "shape":"AzureBlobSasToken", - "documentation":"

    Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level.

    The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:

    sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D

    " + "documentation":"

    Specifies a SAS token that provides permissions to access your Azure Blob Storage.

    The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:

    sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D

    " } }, "documentation":"

    The shared access signature (SAS) configuration that allows DataSync to access your Microsoft Azure Blob Storage.

    For more information, see SAS tokens for accessing your Azure Blob Storage.

    " @@ -1675,6 +1675,10 @@ "Includes":{ "shape":"FilterList", "documentation":"

    Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"

    Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

    " } }, "documentation":"

    CreateTaskRequest

    " @@ -2456,7 +2460,7 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the transfer task that's running.

    " + "documentation":"

    Specifies the Amazon Resource Name (ARN) of the task execution that you want information about.

    " } }, "documentation":"

    DescribeTaskExecutionRequest

    " @@ -2466,11 +2470,11 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"

    The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

    For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

    " + "documentation":"

    The ARN of the task execution that you wanted information about. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

    For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

    " }, "Status":{ "shape":"TaskExecutionStatus", - "documentation":"

    The status of the task execution.

    For detailed information about task execution statuses, see Understanding Task Statuses in the DataSync User Guide.

    " + "documentation":"

    The status of the task execution.

    " }, "Options":{"shape":"Options"}, "Excludes":{ @@ -2483,23 +2487,23 @@ }, "StartTime":{ "shape":"Time", - "documentation":"

    The time that the task execution was started.

    " + "documentation":"

    The time when the task execution started.

    " }, "EstimatedFilesToTransfer":{ "shape":"long", - "documentation":"

    The expected number of files that is to be transferred over the network. This value is calculated during the PREPARING phase before the TRANSFERRING phase of the task execution. This value is the expected number of files to be transferred. It's calculated based on comparing the content of the source and destination locations and finding the delta that needs to be transferred.

    " + "documentation":"

    The expected number of files, objects, and directories that DataSync will transfer over the network. This value is calculated during the task execution's PREPARING phase before the TRANSFERRING phase. The calculation is based on comparing the content of the source and destination locations and finding the difference that needs to be transferred.

    " }, "EstimatedBytesToTransfer":{ "shape":"long", - "documentation":"

    The estimated physical number of bytes that is to be transferred over the network.

    " + "documentation":"

    The estimated physical number of bytes that will transfer over the network.

    " }, "FilesTransferred":{ "shape":"long", - "documentation":"

    The actual number of files that was transferred over the network. This value is calculated and updated on an ongoing basis during the TRANSFERRING phase of the task execution. It's updated periodically when each file is read from the source and sent over the network.

    If failures occur during a transfer, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an indicator for a correct file number or to monitor your task execution.

    " + "documentation":"

    The actual number of files, objects, and directories that DataSync transferred over the network. This value is updated periodically during the task execution's TRANSFERRING phase when something is read from the source and sent over the network.

    If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an exact indication of what transferred or to monitor your task execution.

    " }, "BytesWritten":{ "shape":"long", - "documentation":"

    The number of logical bytes written to the destination Amazon Web Services storage resource.

    " + "documentation":"

    The number of logical bytes written to the destination location.

    " }, "BytesTransferred":{ "shape":"long", @@ -2512,6 +2516,30 @@ "BytesCompressed":{ "shape":"long", "documentation":"

    The physical number of bytes transferred over the network after compression was applied. In most cases, this number is less than BytesTransferred unless the data isn't compressible.

    " + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"

    The configuration of your task report, which provides detailed information about for your DataSync transfer.

    " + }, + "FilesDeleted":{ + "shape":"long", + "documentation":"

    The number of files, objects, and directories that DataSync deleted in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

    " + }, + "FilesSkipped":{ + "shape":"long", + "documentation":"

    The number of files, objects, and directories that DataSync skipped during your transfer.

    " + }, + "FilesVerified":{ + "shape":"long", + "documentation":"

    The number of files, objects, and directories that DataSync verified during your transfer.

    " + }, + "ReportResult":{ + "shape":"ReportResult", + "documentation":"

    Indicates whether DataSync generated a complete task report for your transfer.

    " + }, + "EstimatedFilesToDelete":{ + "shape":"long", + "documentation":"

    The expected number of files, objects, and directories that DataSync will delete in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

    " } }, "documentation":"

    DescribeTaskExecutionResponse

    " @@ -2593,6 +2621,10 @@ "Includes":{ "shape":"FilterList", "documentation":"

    A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"

    The configuration of your task report. For more information, see Creating a task report.

    " } }, "documentation":"

    DescribeTaskResponse

    " @@ -3744,6 +3776,13 @@ "NONE" ] }, + "ObjectVersionIds":{ + "type":"string", + "enum":[ + "INCLUDE", + "NONE" + ] + }, "OnPremConfig":{ "type":"structure", "required":["AgentArns"], @@ -3834,7 +3873,7 @@ "documentation":"

    Specifies whether object tags are preserved when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value.

    Default Value: PRESERVE

    " } }, - "documentation":"

    Configures your DataSync task settings. These options include how DataSync handles files, objects, and their associated metadata. You also can specify how DataSync verifies data integrity, set bandwidth limits for your task, among other options.

    Each task setting has a default value. Unless you need to, you don't have to configure any of these Options before starting your task.

    " + "documentation":"

    Indicates how your transfer task is configured. These options include how DataSync handles files, objects, and their associated metadata during your transfer. You also can specify how to verify data integrity, set bandwidth limits for your task, among other options.

    Each option has a default value. Unless you need to, you don't have to configure any of these options before starting your task.

    " }, "OutputTagList":{ "type":"list", @@ -4017,6 +4056,102 @@ "members":{ } }, + "ReportDestination":{ + "type":"structure", + "members":{ + "S3":{ + "shape":"ReportDestinationS3", + "documentation":"

    Specifies the Amazon S3 bucket where DataSync uploads your task report.

    " + } + }, + "documentation":"

    Specifies where DataSync uploads your task report.

    " + }, + "ReportDestinationS3":{ + "type":"structure", + "required":[ + "S3BucketArn", + "BucketAccessRoleArn" + ], + "members":{ + "Subdirectory":{ + "shape":"S3Subdirectory", + "documentation":"

    Specifies a bucket prefix for your report.

    " + }, + "S3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

    Specifies the ARN of the S3 bucket where DataSync uploads your report.

    " + }, + "BucketAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

    Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see Allowing DataSync to upload a task report to an Amazon S3 bucket.

    " + } + }, + "documentation":"

    Specifies the Amazon S3 bucket where DataSync uploads your task report.

    " + }, + "ReportLevel":{ + "type":"string", + "enum":[ + "ERRORS_ONLY", + "SUCCESSES_AND_ERRORS" + ] + }, + "ReportOutputType":{ + "type":"string", + "enum":[ + "SUMMARY_ONLY", + "STANDARD" + ] + }, + "ReportOverride":{ + "type":"structure", + "members":{ + "ReportLevel":{ + "shape":"ReportLevel", + "documentation":"

    Specifies whether your task report includes errors only or successes and errors.

    For example, your report might mostly include only what didn't go well in your transfer (ERRORS_ONLY). At the same time, you want to verify that your task filter is working correctly. In this situation, you can get a list of what files DataSync successfully skipped and if something transferred that you didn't to transfer (SUCCESSES_AND_ERRORS).

    " + } + }, + "documentation":"

    Specifies the level of detail for a particular aspect of your DataSync task report.

    " + }, + "ReportOverrides":{ + "type":"structure", + "members":{ + "Transferred":{ + "shape":"ReportOverride", + "documentation":"

    Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer.

    " + }, + "Verified":{ + "shape":"ReportOverride", + "documentation":"

    Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. This only applies if you configure your task to verify data during and after the transfer (which DataSync does by default).

    " + }, + "Deleted":{ + "shape":"ReportOverride", + "documentation":"

    Specifies the level of reporting for the files, objects, and directories that DataSync attempted to delete in your destination location. This only applies if you configure your task to delete data in the destination that isn't in the source.

    " + }, + "Skipped":{ + "shape":"ReportOverride", + "documentation":"

    Specifies the level of reporting for the files, objects, and directories that DataSync attempted to skip during your transfer.

    " + } + }, + "documentation":"

    The level of detail included in each aspect of your DataSync task report.

    " + }, + "ReportResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"PhaseStatus", + "documentation":"

    Indicates whether DataSync is still working on your report, created a report, or can't create a complete report.

    " + }, + "ErrorCode":{ + "shape":"string", + "documentation":"

    Indicates the code associated with the error if DataSync can't create a complete report.

    " + }, + "ErrorDetail":{ + "shape":"string", + "documentation":"

    Provides details about issues creating a report.

    " + } + }, + "documentation":"

    Indicates whether DataSync created a complete task report for your transfer.

    " + }, "ResourceDetails":{ "type":"structure", "members":{ @@ -4234,6 +4369,10 @@ "Tags":{ "shape":"InputTagList", "documentation":"

    Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution.

    Tags are key-value pairs that help you manage, filter, and search for your DataSync resources.

    " + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"

    Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

    " } }, "documentation":"

    StartTaskExecutionRequest

    " @@ -4505,6 +4644,32 @@ "DISABLED" ] }, + "TaskReportConfig":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"ReportDestination", + "documentation":"

    Specifies the Amazon S3 bucket where DataSync uploads your task report. For more information, see Task reports.

    " + }, + "OutputType":{ + "shape":"ReportOutputType", + "documentation":"

    Specifies the type of task report that you want:

    • SUMMARY_ONLY: Provides necessary details about your task, including the number of files, objects, and directories transferred and transfer duration.

    • STANDARD: Provides complete details about your task, including a full list of files, objects, and directories that were transferred, skipped, verified, and more.

    " + }, + "ReportLevel":{ + "shape":"ReportLevel", + "documentation":"

    Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't.

    • ERRORS_ONLY: A report shows what DataSync was unable to transfer, skip, verify, and delete.

    • SUCCESSES_AND_ERRORS: A report shows what DataSync was able and unable to transfer, skip, verify, and delete.

    " + }, + "ObjectVersionIds":{ + "shape":"ObjectVersionIds", + "documentation":"

    Specifies whether your task report includes the new version of each object transferred into an S3 bucket. This only applies if you enable versioning on your bucket. Keep in mind that setting this to INCLUDE can increase the duration of your task execution.

    " + }, + "Overrides":{ + "shape":"ReportOverrides", + "documentation":"

    Customizes the reporting level for aspects of your task report. For example, your report might generally only include errors, but you could specify that you want a list of successes and errors just for the files that DataSync attempted to delete in your destination location.

    " + } + }, + "documentation":"

    Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

    For more information, see Task reports.

    " + }, "TaskSchedule":{ "type":"structure", "required":["ScheduleExpression"], @@ -4915,6 +5080,10 @@ "Includes":{ "shape":"FilterList", "documentation":"

    Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " + }, + "TaskReportConfig":{ + "shape":"TaskReportConfig", + "documentation":"

    Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

    " } }, "documentation":"

    UpdateTaskResponse

    " diff --git a/services/dax/pom.xml b/services/dax/pom.xml index e60dd7a979e1..bf6df4b627a7 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index d0a03d0cbe54..214ca2e0b578 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/detective/src/main/resources/codegen-resources/service-2.json b/services/detective/src/main/resources/codegen-resources/service-2.json index 297e16c60ec9..0af98aa33613 100644 --- a/services/detective/src/main/resources/codegen-resources/service-2.json +++ b/services/detective/src/main/resources/codegen-resources/service-2.json @@ -754,12 +754,14 @@ "type":"string", "max":64, "min":1, - "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$" + "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$", + "sensitive":true }, "EmailMessage":{ "type":"string", "max":1000, - "min":1 + "min":1, + "sensitive":true }, "EnableOrganizationAdminAccountRequest":{ "type":"structure", diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 1b5bebe6309c..3fb5535935e7 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devicefarm/src/main/resources/codegen-resources/customization.config b/services/devicefarm/src/main/resources/codegen-resources/customization.config index 9672f49afb18..158dab87a468 100644 --- a/services/devicefarm/src/main/resources/codegen-resources/customization.config +++ b/services/devicefarm/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "listOfferings", "listProjects" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "purchaseOffering", "renewOffering", "listVPCEConfigurations" diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 6b5866e4e08a..e007479520c7 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 82438467b1c0..11eb28b1b0df 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/customization.config b/services/directconnect/src/main/resources/codegen-resources/customization.config index dcf4f21ff809..4afc46a926a1 100644 --- a/services/directconnect/src/main/resources/codegen-resources/customization.config +++ b/services/directconnect/src/main/resources/codegen-resources/customization.config @@ -8,7 +8,7 @@ "describeVirtualGateways", "describeVirtualInterfaces" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createBGPPeer", "deleteBGPPeer", "describeDirectConnectGatewayAttachments", diff --git a/services/directory/pom.xml b/services/directory/pom.xml index d3f0027ee4fa..6abb375a0232 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index fd66c0bbc4ef..d6f2bbd08ded 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index ea2df4c1a78c..773e0b085353 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 6a89affe5b61..b1b5bb7b4008 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index bb1c4a81a469..7677e44e8bcd 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index e5d2051168bf..2a4f1d2219c4 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index fcc647fe8fa2..2fb1b827d752 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index d40cb3d9c822..52419854db66 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/customization.config b/services/ec2/src/main/resources/codegen-resources/customization.config index 1c08d1cf45f3..df7bb789e2d6 100644 --- a/services/ec2/src/main/resources/codegen-resources/customization.config +++ b/services/ec2/src/main/resources/codegen-resources/customization.config @@ -81,11 +81,6 @@ "describeVpnGateways" ], "shapeModifiers": { - "*": { - "exclude": [ - "DryRun" - ] - }, "AttachClassicLinkVpcResult": { "modify": [ { @@ -330,7 +325,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "acceptVpcPeeringConnection", "authorizeSecurityGroupIngress", "cancelImportTask", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 330d367716e0..e2c60af559ec 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -1075,7 +1075,7 @@ }, "input":{"shape":"CreateSubnetCidrReservationRequest"}, "output":{"shape":"CreateSubnetCidrReservationResult"}, - "documentation":"

    Creates a subnet CIDR reservation. For information about subnet CIDR reservations, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide.

    " + "documentation":"

    Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide and Assign prefixes to network interfaces in the Amazon Elastic Compute Cloud User Guide.

    " }, "CreateTags":{ "name":"CreateTags", @@ -1549,6 +1549,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteKeyPairRequest"}, + "output":{"shape":"DeleteKeyPairResult"}, "documentation":"

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    " }, "DeleteLaunchTemplate":{ @@ -1569,7 +1570,7 @@ }, "input":{"shape":"DeleteLaunchTemplateVersionsRequest"}, "output":{"shape":"DeleteLaunchTemplateVersionsResult"}, - "documentation":"

    Deletes one or more versions of a launch template. You cannot delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

    " + "documentation":"

    Deletes one or more versions of a launch template.

    You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

    You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions.

    For more information, see Delete a launch template version in the EC2 User Guide.

    " }, "DeleteLocalGatewayRoute":{ "name":"DeleteLocalGatewayRoute", @@ -12288,7 +12289,7 @@ }, "LogFormat":{ "shape":"String", - "documentation":"

    The fields to include in the flow log record. List the fields in the order in which they should appear. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see Flow log records in the Amazon VPC User Guide or Transit Gateway Flow Log records in the Amazon Web Services Transit Gateway Guide.

    Specify the fields using the ${field-id} format, separated by spaces. For the CLI, surround this parameter value with single quotes on Linux or double quotes on Windows.

    " + "documentation":"

    The fields to include in the flow log record. List the fields in the order in which they should appear. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see Flow log records in the Amazon VPC User Guide or Transit Gateway Flow Log records in the Amazon Web Services Transit Gateway Guide.

    Specify the fields using the ${field-id} format, separated by spaces.

    " }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -14042,7 +14043,7 @@ }, "ReservationType":{ "shape":"SubnetCidrReservationType", - "documentation":"

    The type of reservation.

    The following are valid values:

    • prefix: The Amazon EC2 Prefix Delegation feature assigns the IP addresses to network interfaces that are associated with an instance. For information about Prefix Delegation, see Prefix Delegation for Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

    • explicit: You manually assign the IP addresses to resources that reside in your subnet.

    " + "documentation":"

    The type of reservation. The reservation type determines how the reserved IP addresses are assigned to resources.

    • prefix - Amazon Web Services assigns the reserved IP addresses to network interfaces.

    • explicit - You assign the reserved IP addresses to network interfaces.

    " }, "Description":{ "shape":"String", @@ -14285,7 +14286,7 @@ }, "PacketLength":{ "shape":"Integer", - "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. Do not specify this parameter when you want to mirror the entire packet. To mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target.

    If you do not want to mirror the entire packet, use the PacketLength parameter to specify the number of bytes in each packet to mirror.

    " + "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. Do not specify this parameter when you want to mirror the entire packet. To mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target.

    If you do not want to mirror the entire packet, use the PacketLength parameter to specify the number of bytes in each packet to mirror.

    For sessions with Network Load Balancer (NLB) Traffic Mirror targets the default PacketLength will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater than 8500 will result in an error response.

    " }, "SessionNumber":{ "shape":"Integer", @@ -15342,12 +15343,12 @@ }, "SubnetIds":{ "shape":"VpcEndpointSubnetIdList", - "documentation":"

    (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you can specify only one subnet.

    ", + "documentation":"

    (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in which to create endpoint network interfaces. For a Gateway Load Balancer endpoint, you can specify only one subnet.

    ", "locationName":"SubnetId" }, "SecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interface. If this parameter is not specified, we use the default security group for the VPC.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC.

    ", "locationName":"SecurityGroupId" }, "IpAddressType":{ @@ -15370,6 +15371,11 @@ "shape":"TagSpecificationList", "documentation":"

    The tags to associate with the endpoint.

    ", "locationName":"TagSpecification" + }, + "SubnetConfigurations":{ + "shape":"SubnetConfigurationsList", + "documentation":"

    The subnet configurations for the endpoint.

    ", + "locationName":"SubnetConfiguration" } } }, @@ -16427,6 +16433,21 @@ } } }, + "DeleteKeyPairResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "documentation":"

    Is true if the request succeeds, and an error otherwise.

    ", + "locationName":"return" + }, + "KeyPairId":{ + "shape":"String", + "documentation":"

    The ID of the key pair.

    ", + "locationName":"keyPairId" + } + } + }, "DeleteLaunchTemplateRequest":{ "type":"structure", "members":{ @@ -16472,7 +16493,7 @@ }, "Versions":{ "shape":"VersionStringList", - "documentation":"

    The version numbers of one or more launch template versions to delete.

    ", + "documentation":"

    The version numbers of one or more launch template versions to delete. You can specify up to 200 launch template version numbers.

    ", "locationName":"LaunchTemplateVersion" } } @@ -20339,7 +20360,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    The filters.

    • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

    • architecture - The instance architecture (i386 | x86_64 | arm64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

    • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IPv4 address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

    • metadata-options.http-tokens - The metadata request authorization state (optional | required)

    • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

    • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

    • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

    • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

    • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

    • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

    • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.description - The description of the network interface.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

    • owner-id - The Amazon Web Services account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • placement-partition-number - The partition in which the instance is located.

    • platform - The platform. To list only Windows instances, use windows.

    • private-dns-name - The private IPv4 DNS name of the instance.

    • private-ip-address - The private IPv4 address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

    • root-device-name - The device name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    ", + "documentation":"

    The filters.

    • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

    • architecture - The instance architecture (i386 | x86_64 | arm64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred).

    • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

    • capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none).

    • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation.

    • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group.

    • client-token - The idempotency token you provided when you launched the instance.

    • current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi).

    • dns-name - The public DNS name of the instance.

    • ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O.

    • ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA.

    • enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

    • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

    • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID.

    • iam-instance-profile.name - The instance profile associated with the instance. Specified as an name.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IPv4 address of the instance.

    • ipv6-address - The IPv6 address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

    • license-pool -

    • maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default).

    • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

    • metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled).

    • metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled).

    • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

    • metadata-options.http-tokens - The metadata request authorization state (optional | required)

    • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

    • metadata-options.state - The state of the metadata option changes (pending | applied).

    • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

    • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

    • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

    • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.description - The description of the network interface.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

    • owner-id - The Amazon Web Services account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • placement-partition-number - The partition in which the instance is located.

    • platform - The platform. To list only Windows instances, use windows.

    • platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web).

    • private-dns-name - The private IPv4 DNS name of the instance.

    • private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records.

    • private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

    • private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name).

    • private-ip-address - The private IPv4 address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

    • root-device-name - The device name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0).

    • usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202).

    • usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z.

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    ", "locationName":"Filter" }, "InstanceIds":{ @@ -30124,7 +30145,7 @@ "locationName":"instanceId" }, "PasswordData":{ - "shape":"String", + "shape":"PasswordData", "documentation":"

    The password of the instance. Returns an empty string if the password is not available.

    ", "locationName":"passwordData" }, @@ -35341,7 +35362,47 @@ "m7i-flex.xlarge", "m7i-flex.2xlarge", "m7i-flex.4xlarge", - "m7i-flex.8xlarge" + "m7i-flex.8xlarge", + "m7a.medium", + "m7a.large", + "m7a.xlarge", + "m7a.2xlarge", + "m7a.4xlarge", + "m7a.8xlarge", + "m7a.12xlarge", + "m7a.16xlarge", + "m7a.24xlarge", + "m7a.32xlarge", + "m7a.48xlarge", + "m7a.metal-48xl", + "hpc7a.12xlarge", + "hpc7a.24xlarge", + "hpc7a.48xlarge", + "hpc7a.96xlarge", + "c7gd.medium", + "c7gd.large", + "c7gd.xlarge", + "c7gd.2xlarge", + "c7gd.4xlarge", + "c7gd.8xlarge", + "c7gd.12xlarge", + "c7gd.16xlarge", + "m7gd.medium", + "m7gd.large", + "m7gd.xlarge", + "m7gd.2xlarge", + "m7gd.4xlarge", + "m7gd.8xlarge", + "m7gd.12xlarge", + "m7gd.16xlarge", + "r7gd.medium", + "r7gd.large", + "r7gd.xlarge", + "r7gd.2xlarge", + "r7gd.4xlarge", + "r7gd.8xlarge", + "r7gd.12xlarge", + "r7gd.16xlarge" ] }, "InstanceTypeHypervisor":{ @@ -39251,7 +39312,8 @@ "enum":[ "region", "availability-zone", - "availability-zone-id" + "availability-zone-id", + "outpost" ] }, "LogDestinationType":{ @@ -41138,7 +41200,7 @@ }, "PacketLength":{ "shape":"Integer", - "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet.

    " + "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet.

    For sessions with Network Load Balancer (NLB) traffic mirror targets, the default PacketLength will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater than 8500 will result in an error response.

    " }, "SessionNumber":{ "shape":"Integer", @@ -41856,12 +41918,12 @@ }, "AddSecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the network interface.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interfaces.

    ", "locationName":"AddSecurityGroupId" }, "RemoveSecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to disassociate from the network interface.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to disassociate from the endpoint network interfaces.

    ", "locationName":"RemoveSecurityGroupId" }, "IpAddressType":{ @@ -41875,6 +41937,11 @@ "PrivateDnsEnabled":{ "shape":"Boolean", "documentation":"

    (Interface endpoint) Indicates whether a private hosted zone is associated with the VPC.

    " + }, + "SubnetConfigurations":{ + "shape":"SubnetConfigurationsList", + "documentation":"

    The subnet configurations for the endpoint.

    ", + "locationName":"SubnetConfiguration" } } }, @@ -44056,6 +44123,10 @@ "monthly" ] }, + "PasswordData":{ + "type":"string", + "sensitive":true + }, "PathComponent":{ "type":"structure", "members":{ @@ -49396,13 +49467,21 @@ "locationName":"uploadPolicy" }, "UploadPolicySignature":{ - "shape":"String", + "shape":"S3StorageUploadPolicySignature", "documentation":"

    The signature of the JSON document.

    ", "locationName":"uploadPolicySignature" } }, "documentation":"

    Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.

    " }, + "S3StorageUploadPolicy":{ + "type":"string", + "sensitive":true + }, + "S3StorageUploadPolicySignature":{ + "type":"string", + "sensitive":true + }, "SSEType":{ "type":"string", "enum":[ @@ -52493,6 +52572,31 @@ "explicit" ] }, + "SubnetConfiguration":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

    The ID of the subnet.

    " + }, + "Ipv4":{ + "shape":"String", + "documentation":"

    The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4.

    If you specify an IPv4 address when modifying a VPC endpoint, we replace the existing endpoint network interface with a new endpoint network interface with this IP address. This process temporarily disconnects the subnet and the VPC endpoint.

    " + }, + "Ipv6":{ + "shape":"String", + "documentation":"

    The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6.

    If you specify an IPv6 address when modifying a VPC endpoint, we replace the existing endpoint network interface with a new endpoint network interface with this IP address. This process temporarily disconnects the subnet and the VPC endpoint.

    " + } + }, + "documentation":"

    Describes the configuration of a subnet for a VPC endpoint.

    " + }, + "SubnetConfigurationsList":{ + "type":"list", + "member":{ + "shape":"SubnetConfiguration", + "locationName":"item" + } + }, "SubnetId":{"type":"string"}, "SubnetIdStringList":{ "type":"list", diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index ef5551eed22b..876a81ae4f7e 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 852c1af84738..038af0132d84 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 14440342a65e..4b0cb6eeaa9e 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 821bac01625a..9d89c29ca405 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/customization.config b/services/ecs/src/main/resources/codegen-resources/customization.config index 58e305ca06b7..b0f923be73f8 100644 --- a/services/ecs/src/main/resources/codegen-resources/customization.config +++ b/services/ecs/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "listTaskDefinitionFamilies", "listTaskDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "discoverPollEndpoint", "registerContainerInstance", "submitContainerStateChange", diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index 6e93278b9dd5..32440d8683f1 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -572,7 +572,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Modifies an account setting. Account settings are set on a per-Region basis.

    If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

    When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    " + "documentation":"

    Modifies an account setting. Account settings are set on a per-Region basis.

    If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

    When you specify serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    When you specify awsvpcTrunking, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    When you specify containerInsights, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    " }, "PutAccountSettingDefault":{ "name":"PutAccountSettingDefault", @@ -1084,7 +1084,7 @@ }, "managedScaling":{ "shape":"ManagedScaling", - "documentation":"

    he managed scaling settings for the Auto Scaling group capacity provider.

    " + "documentation":"

    The managed scaling settings for the Auto Scaling group capacity provider.

    " }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", @@ -1674,7 +1674,7 @@ }, "systemControls":{ "shape":"SystemControls", - "documentation":"

    A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

    We don't recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that's started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

    " + "documentation":"

    A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

    We don't recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that's started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

    This parameter is not supported for Windows containers.

    This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

    " }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -1762,7 +1762,7 @@ }, "runningTasksCount":{ "shape":"Integer", - "documentation":"

    The number of tasks on the container instance that are in the RUNNING status.

    " + "documentation":"

    The number of tasks on the container instance that have a desired status (desiredStatus) of RUNNING.

    " }, "pendingTasksCount":{ "shape":"Integer", @@ -4327,11 +4327,11 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide.

    When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.

    " + "documentation":"

    The resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the ENI limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the default wait time to retire a Fargate task due to required maintenance is affected.

    When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.

    When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to set the wait time to retire a Fargate task to the default. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    " }, "value":{ "shape":"String", - "documentation":"

    The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.

    " + "documentation":"

    The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.

    When you specify fargateTaskRetirementWaitPeriod for the name, the following are the valid values:

    • 0 - Amazon Web Services sends the notification, and immediately retires the affected tasks.

    • 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.

    • 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.

    " } } }, @@ -4353,15 +4353,15 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The Amazon ECS resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If fargateFIPSMode is specified, Fargate FIPS 140 compliance is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide.

    " + "documentation":"

    The Amazon ECS resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the wait time to retire a Fargate task is affected.

    " }, "value":{ "shape":"String", - "documentation":"

    The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.

    " + "documentation":"

    The account setting value for the specified principal ARN. Accepted values are enabled, disabled, on, and off.

    When you specify fargateTaskRetirementWaitPeriod for the name, the following are the valid values:

    • 0 - Amazon Web Services sends the notification, and immediately retires the affected tasks.

    • 7 - Amazon Web Services sends the notification, and waits 7 calendar days to retire the tasks.

    • 14 - Amazon Web Services sends the notification, and waits 14 calendar days to retire the tasks.

    " }, "principalArn":{ "shape":"String", - "documentation":"

    The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it modifies the account setting for all users, roles, and the root user of the account unless a user or role explicitly overrides these settings. If this field is omitted, the setting is changed only for the authenticated user.

    Federated users assume the account setting of the root user and can't have explicit account settings set for them.

    " + "documentation":"

    The ARN of the principal, which can be a user, role, or the root user. If you specify the root user, it modifies the account setting for all users, roles, and the root user of the account unless a user or role explicitly overrides these settings. If this field is omitted, the setting is changed only for the authenticated user.

    You must use the root user when you set the Fargate wait time (fargateTaskRetirementWaitPeriod).

    Federated users assume the account setting of the root user and can't have explicit account settings set for them.

    " } } }, @@ -4531,7 +4531,7 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

    The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

    If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

    This parameter is not supported for Windows containers or tasks run on Fargate.

    " + "documentation":"

    The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

    If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

    If task is specified, all containers within the specified task share the same process namespace.

    If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.

    If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.

    This parameter is not supported for Windows containers.

    This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

    " }, "ipcMode":{ "shape":"IpcMode", @@ -5161,7 +5161,8 @@ "awsvpcTrunking", "containerInsights", "fargateFIPSMode", - "tagResourceAuthorization" + "tagResourceAuthorization", + "fargateTaskRetirementWaitPeriod" ] }, "Settings":{ @@ -5425,7 +5426,7 @@ }, "value":{ "shape":"String", - "documentation":"

    The value for the namespaced kernel parameter that's specified in namespace.

    " + "documentation":"

    The namespaced kernel parameter to set a value for.

    Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and Sysctls that start with \"fs.mqueue.*\"

    Valid network namespace values: Sysctls that start with \"net.*\"

    All of these values are supported by Fargate.

    " } }, "documentation":"

    A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

    We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task. This task also uses either the awsvpc or host network mode. It does it for the following reasons.

    • For tasks that use the awsvpc network mode, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that's started last determines which systemControls take effect.

    • For tasks that use the host network mode, the systemControls parameter applies to the container instance's kernel parameter and that of all containers of any tasks running on that container instance.

    " @@ -5733,7 +5734,7 @@ }, "pidMode":{ "shape":"PidMode", - "documentation":"

    The process namespace to use for the containers in the task. The valid values are host or task. If host is specified, then all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace. For more information, see PID settings in the Docker run reference.

    If the host PID mode is used, be aware that there is a heightened risk of undesired process namespace expose. For more information, see Docker security.

    This parameter is not supported for Windows containers or tasks run on Fargate.

    " + "documentation":"

    The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

    If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.

    If task is specified, all containers within the specified task share the same process namespace.

    If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.

    If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.

    This parameter is not supported for Windows containers.

    This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

    " }, "ipcMode":{ "shape":"IpcMode", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index f55f3d96d77a..1111bf3d1249 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/customization.config b/services/efs/src/main/resources/codegen-resources/customization.config index d2fe9be19d69..ea0753f0e300 100644 --- a/services/efs/src/main/resources/codegen-resources/customization.config +++ b/services/efs/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeFileSystems" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeMountTargets" ] } diff --git a/services/eks/pom.xml b/services/eks/pom.xml index ad9730b620fa..f3b5d9a55928 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index dcb5c9d4b091..e186085ae126 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticache/src/main/resources/codegen-resources/customization.config b/services/elasticache/src/main/resources/codegen-resources/customization.config index ebe2ffd7cdf1..8f466b4cef8e 100644 --- a/services/elasticache/src/main/resources/codegen-resources/customization.config +++ b/services/elasticache/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "describeReservedCacheNodesOfferings", "describeSnapshots" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeCacheSecurityGroups", "listAllowedNodeTypeModifications" ] diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index aba0b7f911e2..42f6b715128e 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config index b2845ceed178..f0f2660d2737 100644 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "abortEnvironmentUpdate", "composeEnvironments", "deletePlatformVersion", diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index a04cda02a183..74133e5ae109 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 1a5e796e3d54..4de5e766b41f 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index eb38fefa86e9..472df30c1f78 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config index 3d22a294f4b3..45f2d8ee516e 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "describeSSLPolicies", "describeTargetGroups" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeRules", "describeListeners" ] diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 78128a590f9c..ac783e2300a6 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index df77c9e4543d..d6d07766f8b8 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -244,7 +244,7 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"InvalidTargetException"} ], - "documentation":"

    Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

    Note: If the specified target does not exist, the action returns successfully.

    " + "documentation":"

    Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

    The load balancer stops sending requests to targets that are deregistering, but uses connection draining to ensure that in-flight traffic completes on the existing connections. This deregistration delay is configured by default but can be updated for each target group.

    For more information, see the following:

    Note: If the specified target does not exist, the action returns successfully.

    " }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -3139,7 +3139,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

    The name of the attribute.

    The following attributes are supported by all load balancers:

    • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

    • stickiness.enabled - Indicates whether target stickiness is enabled. The value is true or false. The default is false.

    • stickiness.type - Indicates the type of stickiness. The possible values are:

      • lb_cookie and app_cookie for Application Load Balancers.

      • source_ip for Network Load Balancers.

      • source_ip_dest_ip and source_ip_dest_ip_proto for Gateway Load Balancers.

    The following attributes are supported by Application Load Balancers and Network Load Balancers:

    • load_balancing.cross_zone.enabled - Indicates whether cross zone load balancing is enabled. The value is true, false or use_load_balancer_configuration. The default is use_load_balancer_configuration.

    • target_group_health.dns_failover.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to the maximum number of targets. The default is off.

    • target_group_health.dns_failover.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to 100. The default is off.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off or an integer from 1 to 100. The default is off.

    The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

    • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

    • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

    • stickiness.app_cookie.cookie_name - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB, AWSALBAPP, and AWSALBTG; they're reserved for use by the load balancer.

    • stickiness.app_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

    • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

    The following attributes are supported only by Network Load Balancers:

    • deregistration_delay.connection_termination.enabled - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true or false. The default is false.

    • preserve_client_ip.enabled - Indicates whether client IP preservation is enabled. The value is true or false. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.

    • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

    The following attributes are supported only by Gateway Load Balancers:

    • target_failover.on_deregistration - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    • target_failover.on_unhealthy - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) cannot be set independently. The value you set for both attributes must be the same.

    " + "documentation":"

    The name of the attribute.

    The following attributes are supported by all load balancers:

    • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

    • stickiness.enabled - Indicates whether target stickiness is enabled. The value is true or false. The default is false.

    • stickiness.type - Indicates the type of stickiness. The possible values are:

      • lb_cookie and app_cookie for Application Load Balancers.

      • source_ip for Network Load Balancers.

      • source_ip_dest_ip and source_ip_dest_ip_proto for Gateway Load Balancers.

    The following attributes are supported by Application Load Balancers and Network Load Balancers:

    • load_balancing.cross_zone.enabled - Indicates whether cross zone load balancing is enabled. The value is true, false or use_load_balancer_configuration. The default is use_load_balancer_configuration.

    • target_group_health.dns_failover.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to the maximum number of targets. The default is off.

    • target_group_health.dns_failover.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to 100. The default is off.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off or an integer from 1 to 100. The default is off.

    The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

    • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

    • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

    • stickiness.app_cookie.cookie_name - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB, AWSALBAPP, and AWSALBTG; they're reserved for use by the load balancer.

    • stickiness.app_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

    • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

    The following attributes are supported only by Network Load Balancers:

    • deregistration_delay.connection_termination.enabled - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true or false. For new UDP/TCP_UDP target groups the default is true. Otherwise, the default is false.

    • preserve_client_ip.enabled - Indicates whether client IP preservation is enabled. The value is true or false. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.

    • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

    • target_health_state.unhealthy.connection_termination.enabled - Indicates whether the load balancer terminates connections to unhealthy targets. The value is true or false. The default is true.

    The following attributes are supported only by Gateway Load Balancers:

    • target_failover.on_deregistration - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    • target_failover.on_unhealthy - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) cannot be set independently. The value you set for both attributes must be the same.

    " }, "Value":{ "shape":"TargetGroupAttributeValue", diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 42e690de0b23..5296508bb50a 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index adc295ead0a3..a8684153eb3c 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index e6179ac876c5..9bb58c8392ee 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/customization.config b/services/emr/src/main/resources/codegen-resources/customization.config index 1bc9a6a3427c..1e066e00e14d 100644 --- a/services/emr/src/main/resources/codegen-resources/customization.config +++ b/services/emr/src/main/resources/codegen-resources/customization.config @@ -14,7 +14,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "cancelSteps", "modifyInstanceGroups", "describeJobFlows" diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 6b3b3e6bda00..5d257d267448 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index 7e2c2ba2bf39..2ee41e6dbbc9 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml index 716d634e7660..fb51faedfb81 100644 --- a/services/entityresolution/pom.xml +++ b/services/entityresolution/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT entityresolution AWS Java SDK :: Services :: Entity Resolution diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index a45f5847df6d..f94579e6558b 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json index 9eaccd9c66e6..c40edab9d49e 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -79,123 +79,87 @@ "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + } + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "events", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, - false + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": { - "authSchemes": [ - { - "name": "sigv4a", - "signingName": "events", - "signingRegionSet": [ - "*" - ] - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseDualStack" + "ref": "PartitionResult" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{EndpointId}.endpoint.events.{PartitionResult#dualStackDnsSuffix}", - "properties": { - "authSchemes": [ - { - "name": "sigv4a", - "signingName": "events", - "signingRegionSet": [ - "*" - ] - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } + "supportsDualStack" ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://{EndpointId}.endpoint.events.{PartitionResult#dnsSuffix}", + "url": "https://{EndpointId}.endpoint.events.{PartitionResult#dualStackDnsSuffix}", "properties": { "authSchemes": [ { @@ -212,15 +176,39 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid Configuration: FIPS is not supported with EventBridge multi-region endpoints.", - "type": "error" + "endpoint": { + "url": "https://{EndpointId}.endpoint.events.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "events", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: FIPS is not supported with EventBridge multi-region endpoints.", + "type": "error" } ] }, @@ -260,52 +248,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -313,13 +305,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -329,92 +330,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://events-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://events-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -423,168 +415,128 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "Region" + }, + "us-gov-east-1" ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://events.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://events.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://events.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "Region" }, - { - "conditions": [], - "endpoint": { - "url": "https://events-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "us-gov-west-1" ] } - ] + ], + "endpoint": { + "url": "https://events.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://events-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://events.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://events.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://events.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://events.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json b/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json index 6a5080439f73..3655193a0a97 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,8 +8,8 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -21,8 +21,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -34,8 +34,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -47,8 +47,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -99,8 +99,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -112,8 +112,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -125,8 +125,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -138,8 +138,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -151,8 +151,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -164,8 +164,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -177,8 +177,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -190,8 +190,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -203,8 +203,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -216,8 +216,8 @@ } }, "params": { - "UseFIPS": false, "Region": "me-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -229,8 +229,8 @@ } }, "params": { - "UseFIPS": false, "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -242,8 +242,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -255,8 +255,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -268,8 +268,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -281,8 +281,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -294,8 +294,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -307,8 +307,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -320,8 +320,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -333,8 +333,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -346,8 +346,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -359,8 +359,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -372,8 +372,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -385,8 +385,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -398,8 +398,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -411,8 +411,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -424,8 +424,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -437,8 +437,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -450,8 +450,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -463,8 +463,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -476,8 +476,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -489,8 +489,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -502,8 +502,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -515,8 +515,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -528,8 +528,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -539,8 +539,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -552,8 +552,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -563,8 +563,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -576,8 +576,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -587,8 +587,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -600,8 +600,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -611,8 +611,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -624,8 +624,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -649,8 +649,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -661,8 +661,8 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } @@ -814,6 +814,18 @@ "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Valid EndpointId with DualStack enabled and partition does not support DualStack", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } } ], "version": "1.0" diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index acc25afde893..b5c383b06c67 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index ec71cb652e99..b10a29ba1eb8 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json index 5299e3d75a86..2ae22d4be88e 100644 --- a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/finspace/src/main/resources/codegen-resources/service-2.json b/services/finspace/src/main/resources/codegen-resources/service-2.json index e0d63a365a5e..79383669015b 100644 --- a/services/finspace/src/main/resources/codegen-resources/service-2.json +++ b/services/finspace/src/main/resources/codegen-resources/service-2.json @@ -528,6 +528,7 @@ {"shape":"AccessDeniedException"}, {"shape":"LimitExceededException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Updates the databases mounted on a kdb cluster, which includes the changesetId and all the dbPaths to be cached. This API does not allow you to change a database name or add a database if you created a cluster without one.

    Using this API you can point a cluster to a different changeset and modify a list of partitions being cached.

    " @@ -610,6 +611,7 @@ "AccessDeniedException":{ "type":"structure", "members":{ + "message":{"shape":"errorMessage"} }, "documentation":"

    You do not have sufficient access to perform this action.

    ", "error":{"httpStatusCode":403}, @@ -680,7 +682,7 @@ "documentation":"

    The number of instances running in a cluster.

    " } }, - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "ChangeRequest":{ "type":"structure", @@ -951,7 +953,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information about like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1036,7 +1038,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1249,7 +1251,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -1797,7 +1799,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1855,7 +1857,7 @@ "members":{ "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    ", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    ", "location":"querystring", "locationName":"userArn" }, @@ -2060,7 +2062,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -2076,6 +2078,25 @@ "type":"string", "enum":["IP_V4"] }, + "IcmpTypeCode":{ + "type":"structure", + "required":[ + "type", + "code" + ], + "members":{ + "type":{ + "shape":"IcmpTypeOrCode", + "documentation":"

    The ICMP type. A value of -1 means all types.

    " + }, + "code":{ + "shape":"IcmpTypeOrCode", + "documentation":"

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    " + } + }, + "documentation":"

    Defines the ICMP protocol that consists of the ICMP type and code.

    " + }, + "IcmpTypeOrCode":{"type":"integer"}, "IdType":{ "type":"string", "max":26, @@ -2224,7 +2245,7 @@ }, "azMode":{ "shape":"KxAzMode", - "documentation":"

    The number of availability zones assigned per cluster. This can be one of the following

    • SINGLE – Assigns one availability zone per cluster.

    • MULTI – Assigns all the availability zones per cluster.

    " + "documentation":"

    The number of availability zones assigned per cluster. This can be one of the following:

    • SINGLE – Assigns one availability zone per cluster.

    • MULTI – Assigns all the availability zones per cluster.

    " }, "availabilityZoneId":{ "shape":"AvailabilityZoneId", @@ -2313,7 +2334,7 @@ "type":"string", "max":50, "min":1, - "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_:.]*" + "pattern":"^[a-zA-Z0-9_:./]+$" }, "KxCommandLineArguments":{ "type":"list", @@ -2386,6 +2407,24 @@ "type":"list", "member":{"shape":"KxDatabaseListEntry"} }, + "KxDeploymentConfiguration":{ + "type":"structure", + "required":["deploymentStrategy"], + "members":{ + "deploymentStrategy":{ + "shape":"KxDeploymentStrategy", + "documentation":"

    The type of deployment that you want on a cluster.

    • ROLLING – This options loads the updated database by stopping the exiting q process and starting a new q process with updated configuration.

    • NO_RESTART – This option loads the updated database on the running q process without stopping it. This option is quicker as it reduces the turn around time to update a kdb database changeset configuration on a cluster.

    " + } + }, + "documentation":"

    The configuration that allows you to choose how you want to update the databases on a cluster. Depending on the option you choose, you can reduce the time it takes to update the database changesets on to a cluster.

    " + }, + "KxDeploymentStrategy":{ + "type":"string", + "enum":[ + "NO_RESTART", + "ROLLING" + ] + }, "KxEnvironment":{ "type":"structure", "members":{ @@ -2511,7 +2550,7 @@ }, "size":{ "shape":"KxSavedownStorageSize", - "documentation":"

    The size of temporary storage in bytes.

    " + "documentation":"

    The size of temporary storage in gibibytes.

    " } }, "documentation":"

    The size and type of temporary storage that is used to hold data during the savedown process. All the data written to this storage space is lost when the cluster node is restarted.

    " @@ -2530,7 +2569,7 @@ "members":{ "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "userName":{ "shape":"KxUserNameString", @@ -2877,16 +2916,57 @@ "min":1, "pattern":"^[a-zA-Z0-9]{1,50}$" }, + "NetworkACLConfiguration":{ + "type":"list", + "member":{"shape":"NetworkACLEntry"}, + "max":100, + "min":1 + }, + "NetworkACLEntry":{ + "type":"structure", + "required":[ + "ruleNumber", + "protocol", + "ruleAction", + "cidrBlock" + ], + "members":{ + "ruleNumber":{ + "shape":"RuleNumber", + "documentation":"

    The rule number for the entry. For example 100. All the network ACL entries are processed in ascending order by rule number.

    " + }, + "protocol":{ + "shape":"Protocol", + "documentation":"

    The protocol number. A value of -1 means all the protocols.

    " + }, + "ruleAction":{ + "shape":"RuleAction", + "documentation":"

    Indicates whether to allow or deny the traffic that matches the rule.

    " + }, + "portRange":{ + "shape":"PortRange", + "documentation":"

    The range of ports the rule applies to.

    " + }, + "icmpTypeCode":{ + "shape":"IcmpTypeCode", + "documentation":"

    Defines the ICMP protocol that consists of the ICMP type and code.

    " + }, + "cidrBlock":{ + "shape":"ValidCIDRBlock", + "documentation":"

    The IPv4 network range to allow or deny, in CIDR notation. For example, 172.16.0.0/24. We modify the specified CIDR block to its canonical form. For example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

    " + } + }, + "documentation":"

    The network access control list (ACL) is an optional layer of security for your VPC that acts as a firewall for controlling traffic in and out of one or more subnets. The entry is a set of numbered ingress and egress rules that determine whether a packet should be allowed in or out of a subnet associated with the ACL. We process the entries in the ACL according to the rule numbers, in ascending order.

    " + }, "NodeCount":{ "type":"integer", - "max":5, "min":1 }, "NodeType":{ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9._]+" + "pattern":"^[a-zA-Z0-9._]+$" }, "PaginationToken":{ "type":"string", @@ -2894,11 +2974,40 @@ "min":1, "pattern":".*" }, + "Port":{ + "type":"integer", + "max":65535, + "min":0 + }, + "PortRange":{ + "type":"structure", + "required":[ + "from", + "to" + ], + "members":{ + "from":{ + "shape":"Port", + "documentation":"

    The first port in the range.

    " + }, + "to":{ + "shape":"Port", + "documentation":"

    The last port in the range.

    " + } + }, + "documentation":"

    The range of ports the rule applies to.

    " + }, + "Protocol":{ + "type":"string", + "max":5, + "min":1, + "pattern":"^-1|[0-9]+$" + }, "ReleaseLabel":{ "type":"string", "max":16, "min":1, - "pattern":"^[a-zA-Z0-9._-]+" + "pattern":"^[a-zA-Z0-9._-]+$" }, "ResourceAlreadyExistsException":{ "type":"structure", @@ -2929,6 +3038,18 @@ "min":20, "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RuleNumber":{ + "type":"integer", + "max":32766, + "min":1 + }, "S3Bucket":{ "type":"string", "max":255, @@ -3075,6 +3196,7 @@ "ThrottlingException":{ "type":"structure", "members":{ + "message":{"shape":"errorMessage"} }, "documentation":"

    The request was denied due to request throttling.

    ", "error":{"httpStatusCode":429}, @@ -3095,6 +3217,10 @@ "routableCIDRSpace":{ "shape":"ValidCIDRSpace", "documentation":"

    The routing CIDR on behalf of kdb environment. It could be any \"/26 range in the 100.64.0.0 CIDR space. After providing, it will be added to the customer's transit gateway routing table so that the traffics could be routed to kdb network.

    " + }, + "attachmentNetworkAclConfiguration":{ + "shape":"NetworkACLConfiguration", + "documentation":"

    The rules that define how you manage the outbound traffic from kdb network to your internal network.

    " } }, "documentation":"

    The structure of the transit gateway and network configuration that is used to connect the kdb environment to an internal network.

    " @@ -3186,11 +3312,16 @@ }, "clientToken":{ "shape":"ClientTokenString", - "documentation":"

    A token that ensures idempotency. This token expires in 10 minutes.

    " + "documentation":"

    A token that ensures idempotency. This token expires in 10 minutes.

    ", + "idempotencyToken":true }, "databases":{ "shape":"KxDatabaseConfigurations", "documentation":"

    The structure of databases mounted on the cluster.

    " + }, + "deploymentConfiguration":{ + "shape":"KxDeploymentConfiguration", + "documentation":"

    The configuration that allows you to choose how you want to update the databases on a cluster.

    " } } }, @@ -3470,7 +3601,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -3482,10 +3613,13 @@ } } }, - "ValidCIDRSpace":{ + "ValidCIDRBlock":{ "type":"string", - "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/26$" + "max":18, + "min":1, + "pattern":"^(?:\\d{1,3}\\.){3}\\d{1,3}(?:\\/(?:3[0-2]|[12]\\d|\\d))$" }, + "ValidCIDRSpace":{"type":"string"}, "ValidHostname":{ "type":"string", "max":255, diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 28fa3c4089c1..2feb801c3a17 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 9fc57ac1a888..440ee1bcd342 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java b/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java deleted file mode 100644 index 95277110d059..000000000000 --- a/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.firehose; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; -import static software.amazon.awssdk.testutils.SdkAsserts.assertNotEmpty; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.List; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.firehose.model.CreateDeliveryStreamRequest; -import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsRequest; -import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsResponse; -import software.amazon.awssdk.services.firehose.model.PutRecordBatchRequest; -import software.amazon.awssdk.services.firehose.model.PutRecordBatchResponseEntry; -import software.amazon.awssdk.services.firehose.model.PutRecordRequest; -import software.amazon.awssdk.services.firehose.model.Record; -import software.amazon.awssdk.services.firehose.model.S3DestinationConfiguration; -import software.amazon.awssdk.testutils.service.AwsTestBase; - - -public class ServiceIntegrationTest extends AwsTestBase { - - private static final String DEVLIVERY_STREAM_NAME = "java-sdk-delivery-stream-" - + System.currentTimeMillis(); - private static final String FAKE_S3_BUCKET_ARN = "arn:aws:s3:::fake-s3-bucket-arn"; - private static final String FAKE_IAM_ROLE_ARN = "arn:aws:iam:::fake-iam-role-arn"; - - private static FirehoseClient firehose; - - - @BeforeClass - public static void setup() throws FileNotFoundException, IOException { - // setUpCredentials(); - // firehose = new AmazonKinesisFirehoseClient(credentials); - // s3 = new AmazonS3Client(credentials); - - // TODO: firehose can't whitelist our shared account at this point, so - // for now we are using the test account provided by the firehose team - firehose = FirehoseClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - } - - @AfterClass - public static void tearDown() { - // firehose.deleteDeliveryStream(new DeleteDeliveryStreamRequest() - // .withDeliveryStreamName(DEVLIVERY_STREAM_NAME)); - } - - // @Test - // Nope, can't make it work without full access to S3 and IAM - public void testOperations() { - - // create delivery stream - CreateDeliveryStreamRequest request = - CreateDeliveryStreamRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .s3DestinationConfiguration(S3DestinationConfiguration.builder() - .bucketARN(FAKE_S3_BUCKET_ARN) - .roleARN(FAKE_IAM_ROLE_ARN) - .build()) - .build(); - firehose.createDeliveryStream(request); - - // put record - String recordId = firehose.putRecord(PutRecordRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .record(Record.builder() - .data(SdkBytes.fromByteArray(new byte[] {0, 1, 2})) - .build()) - .build() - ).recordId(); - assertNotEmpty(recordId); - - // put record batch - List entries = firehose.putRecordBatch( - PutRecordBatchRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .records(Record.builder().data(SdkBytes.fromByteArray(new byte[] {0})).build(), - Record.builder().data(SdkBytes.fromByteArray(new byte[] {1})).build()) - .build() - ).requestResponses(); - assertEquals(2, entries.size()); - for (PutRecordBatchResponseEntry entry : entries) { - if (entry.errorCode() == null) { - assertNotEmpty(entry.recordId()); - } else { - assertNotEmpty(entry.errorMessage()); - } - } - } - - @Test - public void testListDeliveryStreams() { - ListDeliveryStreamsResponse result = firehose - .listDeliveryStreams(ListDeliveryStreamsRequest.builder().build()); - assertNotNull(result.deliveryStreamNames()); - assertNotNull(result.hasMoreDeliveryStreams()); - } - - @Test - public void testCreateDeliveryStream_InvalidParameter() { - try { - firehose.createDeliveryStream(CreateDeliveryStreamRequest.builder().build()); - fail("ValidationException is expected."); - } catch (AwsServiceException exception) { - assertEquals("ValidationException", exception.awsErrorDetails().errorCode()); - assertNotEmpty(exception.awsErrorDetails().errorMessage()); - } - } - -} diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 7d054d67e15d..cc086cc53cad 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index a6860a565aba..a471cca6e681 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/customization.config b/services/fms/src/main/resources/codegen-resources/customization.config index f19e48d42874..f5fe1526e9c4 100644 --- a/services/fms/src/main/resources/codegen-resources/customization.config +++ b/services/fms/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getAdminAccount", "getNotificationChannel", "listMemberAccounts", diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index b2af7ebd8da1..e1b4b76db1e1 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 206ab67690ba..3225a9571cd7 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 78c54254c90d..a7e4ec81b063 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index eb97d843d2d7..690c7c8306f7 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 131e139cb34c..18d5f518af63 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -107,7 +107,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", + "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -127,7 +127,7 @@ {"shape":"InternalServerError"}, {"shape":"DataRepositoryTaskExecuting"} ], - "documentation":"

    Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system.

    You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository.

    You use release data repository tasks to release data from your file system for files that are archived to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system.

    To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    ", + "documentation":"

    Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system.

    You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository.

    You use release data repository tasks to release data from your file system for files that are exported to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system.

    To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    ", "idempotent":true }, "CreateFileCache":{ @@ -304,7 +304,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", + "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "DeleteFileCache":{ @@ -340,7 +340,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", + "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", "idempotent":true }, "DeleteSnapshot":{ @@ -424,7 +424,7 @@ {"shape":"InvalidDataRepositoryType"}, {"shape":"InternalServerError"} ], - "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", + "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2,15 file systems, excluding scratch_1 deployment type.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", "idempotent":true }, "DescribeDataRepositoryTasks":{ @@ -651,7 +651,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", + "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "UpdateFileCache":{ @@ -692,7 +692,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • LogConfiguration

    • LustreRootSquashConfiguration

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " + "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • LogConfiguration

    • LustreRootSquashConfiguration

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " }, "UpdateSnapshot":{ "name":"UpdateSnapshot", @@ -1355,11 +1355,11 @@ "members":{ "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

    Specifies the type of data repository task to create.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " + "documentation":"

    Specifies the type of data repository task to create.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " }, "Paths":{ "shape":"DataRepositoryTaskPaths", - "documentation":"

    A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all archived files that meet the last accessed time criteria (for release tasks).

    • For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

    • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

    • For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release archived files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all archived files in the file system, specify a forward slash (/) as the path.

      A file must also meet the last accessed time criteria specified in for the file to be released.

    " + "documentation":"

    A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks).

    • For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

    • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

    • For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path.

      A file must also meet the last accessed time criteria specified in for the file to be released.

    " }, "FileSystemId":{"shape":"FileSystemId"}, "Report":{ @@ -1515,7 +1515,7 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    Sets the version for the Amazon FSx for Lustre file system that you're creating from a backup. Valid values are 2.10 and 2.12.

    You don't need to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting.

    " + "documentation":"

    Sets the version for the Amazon FSx for Lustre file system that you're creating from a backup. Valid values are 2.10, 2.12, and 2.15.

    You don't need to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting.

    " }, "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", @@ -1628,7 +1628,7 @@ }, "RouteTableIds":{ "shape":"RouteTableIds", - "documentation":"

    (Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " + "documentation":"

    (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -1657,11 +1657,11 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available in the following Amazon Web Services Regions:

    • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " + "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

    • SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    You pay for additional throughput capacity that you provision.

    " + "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -1679,7 +1679,7 @@ }, "RouteTableIds":{ "shape":"RouteTableIds", - "documentation":"

    (Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " + "documentation":"

    (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " } }, "documentation":"

    The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.

    " @@ -1730,7 +1730,7 @@ "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"}, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    (Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10 and 2.12:

    • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

    • 2.12 is supported by all Lustre deployment types. 2.12 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2.

    Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12.

    If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails.

    " + "documentation":"

    (Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12m and 2.15:

    • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

    • 2.12 and 2.15 are supported by all Lustre deployment types. 2.12 or 2.15 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2.

    Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12.

    If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails.

    " }, "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", @@ -2173,7 +2173,7 @@ "documentation":"

    The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

    " } }, - "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    " + "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    " }, "DataRepositoryAssociationId":{ "type":"string", @@ -2263,7 +2263,7 @@ }, "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

    The type of data repository task.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " + "documentation":"

    The type of data repository task.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " }, "CreationTime":{"shape":"CreationTime"}, "StartTime":{ @@ -2306,7 +2306,7 @@ "documentation":"

    The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.

    " } }, - "documentation":"

    A description of the data repository task.

    • You use import and export data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository.

    • You use release data repository tasks to release archived files from your Amazon FSx for Lustre file system.

    • An Amazon File Cache resource uses a task to automatically release files from the cache.

    To learn more about data repository tasks, see Data Repository Tasks.

    " + "documentation":"

    A description of the data repository task.

    • You use import and export data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository.

    • You use release data repository tasks to release have been exported to a linked S3 bucketed files from your Amazon FSx for Lustre file system.

    • An Amazon File Cache resource uses a task to automatically release files from the cache.

    To learn more about data repository tasks, see Data Repository Tasks.

    " }, "DataRepositoryTaskEnded":{ "type":"structure", @@ -3146,10 +3146,10 @@ }, "Value":{ "shape":"Value", - "documentation":"

    An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only archived files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.

    If an archived file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.

    " + "documentation":"

    An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only exported files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.

    If an exported file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.

    " } }, - "documentation":"

    Defines the minimum amount of time since last access for a file to be eligible for release. Only archived files that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

    " + "documentation":"

    Defines the minimum amount of time since last access for a file to be eligible for release. Only files that have been exported to S3 and that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

    " }, "EndTime":{"type":"timestamp"}, "ErrorMessage":{ @@ -3487,7 +3487,7 @@ }, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    The Lustre version of the Amazon FSx for Lustre file system, either 2.10 or 2.12.

    " + "documentation":"

    The Lustre version of the Amazon FSx for Lustre file system, which is 2.10, 2.12, or 2.15.

    " }, "OpenZFSConfiguration":{ "shape":"OpenZFSFileSystemConfiguration", @@ -3903,7 +3903,7 @@ "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 is built on Lustre v2.12 and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " + "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", @@ -4539,10 +4539,10 @@ "members":{ "DurationSinceLastAccess":{ "shape":"DurationSinceLastAccess", - "documentation":"

    Defines the point-in-time since an archived file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.

    " + "documentation":"

    Defines the point-in-time since an exported file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.

    " } }, - "documentation":"

    The configuration that specifies a minimum amount of time since last access for an archived file to be eligible for release from an Amazon FSx for Lustre file system. Only files that were last accessed before this point-in-time can be released. For example, if you specify a last accessed time criteria of 9 days, only files that were last accessed 9.00001 or more days ago can be released.

    Only file data that has been archived can be released. Files that have not yet been archived, such as new or changed files that have not been exported, are not eligible for release. When files are released, their metadata stays on the file system, so they can still be accessed later. Users and applications can access a released file by reading the file again, which restores data from Amazon S3 to the FSx for Lustre file system.

    If a file meets the last accessed time criteria, its file or directory path must also be specified with the Paths parameter of the operation in order for the file to be released.

    " + "documentation":"

    The configuration that specifies a minimum amount of time since last access for an exported file to be eligible for release from an Amazon FSx for Lustre file system. Only files that were last accessed before this point-in-time can be released. For example, if you specify a last accessed time criteria of 9 days, only files that were last accessed 9.00001 or more days ago can be released.

    Only file data that has been exported to S3 can be released. Files that have not yet been exported to S3, such as new or changed files that have not been exported, are not eligible for release. When files are released, their metadata stays on the file system, so they can still be accessed later. Users and applications can access a released file by reading the file again, which restores data from Amazon S3 to the FSx for Lustre file system.

    If a file meets the last accessed time criteria, its file or directory path must also be specified with the Paths parameter of the operation in order for the file to be released.

    " }, "ReleaseFileSystemNfsV3LocksRequest":{ "type":"structure", @@ -5568,7 +5568,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.

    " + "documentation":"

    The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 551541fdcb07..41331707bc54 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/customization.config b/services/gamelift/src/main/resources/codegen-resources/customization.config index dae30876df41..b7c812e13681 100644 --- a/services/gamelift/src/main/resources/codegen-resources/customization.config +++ b/services/gamelift/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ // with STS's naming of a similar shape. "AwsCredentials" : "Credentials" }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "createBuild", "searchGameSessions", "describePlayerSessions", diff --git a/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json index d845bf7ea21e..4b5e0d83c89d 100644 --- a/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://gamelift-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://gamelift-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://gamelift-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://gamelift-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://gamelift.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://gamelift.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://gamelift.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://gamelift.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index c3f2149ec027..814c97017ed3 100644 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

    To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match or failed to respond, the ticket status is set to CANCELLED, and processing is terminated. For tickets where players have accepted or not yet responded, the ticket status is returned to SEARCHING to find a new match. A new matchmaking request for these players can be submitted as needed.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " + "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in each ticket. Calls to this action are only valid for tickets that are in this status; calls for tickets not in this status result in an error.

    To register acceptance, specify the ticket ID, one or more players, and an acceptance response. When all players have accepted, Amazon GameLift advances the matchmaking tickets to status PLACING, and attempts to create a new game session for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. Each matchmaking ticket in the failed match is handled as follows:

    • If the ticket has one or more players who rejected the match or failed to respond, the ticket status is set CANCELLED and processing is terminated.

    • If all players in the ticket accepted the match, the ticket status is returned to SEARCHING to find a new match.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " }, "ClaimGameServer":{ "name":"ClaimGameServer", @@ -159,7 +159,7 @@ {"shape":"IdempotentParameterMismatchException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses FleetIQ algorithms and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The fleet must be in ACTIVE status before a game session can be created in it.

    This operation can be used in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    If successful, a workflow is initiated to start a new game session. A GameSession object is returned containing the game session configuration and status. When the status is ACTIVE, game session connection information is provided and player sessions can be created for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Game session logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Local.

    Learn more

    Start a game session

    All APIs by task

    " + "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses the FleetIQ algorithm and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status.

    You can use this operation in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    • To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location.

    If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Local.

    Learn more

    Start a game session

    All APIs by task

    " }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -545,7 +545,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Removes a compute resource from the specified fleet. Deregister your compute resources before you delete the compute.

    " + "documentation":"

    Removes a compute resource from an Amazon GameLift Anywhere fleet. Deregistered computes can no longer host game sessions through Amazon GameLift.

    " }, "DeregisterGameServer":{ "name":"DeregisterGameServer", @@ -608,7 +608,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves properties for a compute resource. To request a compute resource specify the fleet ID and compute name. If successful, Amazon GameLift returns an object containing the build properties.

    " + "documentation":"

    Retrieves properties for a compute resource in an Amazon GameLift fleet. Call ListCompute to get a list of compute resources in a fleet. You can request information for computes in either managed EC2 fleets or Anywhere fleets.

    To request compute properties, specify the compute name and fleet ID.

    If successful, this operation returns details for the requested compute resource. For managed EC2 fleets, this operation returns the fleet's EC2 instances. For Anywhere fleets, this operation returns the fleet's registered computes.

    " }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -889,7 +889,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves information about a fleet's instances, including instance IDs, connection data, and status.

    This operation can be used in the following ways:

    • To get information on all instances that are deployed to a fleet's home Region, provide the fleet ID.

    • To get information on all instances that are deployed to a fleet's remote location, provide the fleet ID and location name.

    • To get information on a specific instance in a fleet, provide the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, an Instance object is returned for each requested instance. Instances are not returned in any particular order.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations ListCompute and DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets.

    You can call this operation in the following ways:

    • To get information on all instances in a fleet's home Region, specify the fleet ID.

    • To get information on all instances in a fleet's remote location, specify the fleet ID and location name.

    • To get information on a specific instance in a fleet, specify the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "DescribeMatchmaking":{ "name":"DescribeMatchmaking", @@ -951,7 +951,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. If you provide a specific PlayerSessionId or PlayerId, Amazon GameLift ignores the filter criteria. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " }, "DescribeRuntimeConfiguration":{ "name":"DescribeRuntimeConfiguration", @@ -1047,7 +1047,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

    To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

    To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    " + "documentation":"

    Requests authorization to remotely connect to a compute resource in an Amazon GameLift fleet. Call this action to connect to an instance in a managed EC2 fleet if the fleet's game build uses Amazon GameLift server SDK 5.x or later. To connect to instances with game builds that use server SDK 4.x or earlier, call GetInstanceAccess.

    To request access to a compute, identify the specific EC2 instance and the fleet it belongs to. You can retrieve instances for a managed EC2 fleet by calling ListCompute.

    If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. Use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    " }, "GetComputeAuthToken":{ "name":"GetComputeAuthToken", @@ -1063,7 +1063,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests an authentication token from Amazon GameLift. The authentication token is used by your game server to authenticate with Amazon GameLift. Each authentication token has an expiration time. To continue using the compute resource to host your game server, regularly retrieve a new authorization token.

    " + "documentation":"

    Requests an authentication token from Amazon GameLift for a registered compute in an Anywhere fleet. The game servers that are running on the compute use this token to authenticate with the Amazon GameLift service. Each server process must provide a valid authentication token in its call to the Amazon GameLift server SDK action InitSDK().

    Authentication tokens are valid for a limited time span. Use a mechanism to regularly request a fresh authentication token before the current token expires.

    Learn more

    " }, "GetGameSessionLogUrl":{ "name":"GetGameSessionLogUrl", @@ -1095,7 +1095,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

    To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

    To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    Related actions

    All APIs by task

    " + "documentation":"

    Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call GetComputeAccess.

    To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

    If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows:

    • For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client.

    • For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "ListAliases":{ "name":"ListAliases", @@ -1140,7 +1140,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves all compute resources registered to a fleet in your Amazon Web Services account. You can filter the result set by location.

    " + "documentation":"

    Retrieves the compute resources in an Amazon GameLift fleet. You can request information for either managed EC2 fleets or Anywhere fleets.

    To request a list of computes, specify the fleet ID. You can filter the result set by location. Use the pagination parameters to retrieve results in a set of sequential pages.

    If successful, this operation returns the compute resource for the requested fleet. For managed EC2 fleets, it returns a list of EC2 instances. For Anywhere fleets, it returns a list of registered compute names.

    " }, "ListFleets":{ "name":"ListFleets", @@ -1264,7 +1264,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Registers your compute resources in a fleet you previously created. After you register a compute to your fleet, you can monitor and manage your compute using Amazon GameLift. The operation returns the compute resource containing SDK endpoint you can use to connect your game server to Amazon GameLift.

    Learn more

    " + "documentation":"

    Registers a compute resource to an Amazon GameLift Anywhere fleet. With Anywhere fleets you can incorporate your own computing hardware into an Amazon GameLift game hosting solution.

    To register a compute to a fleet, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide the Anywhere fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource.

    If successful, this operation returns the compute details, including an Amazon GameLift SDK endpoint. Game server processes that run on the compute use this endpoint to communicate with the Amazon GameLift service. Each server process includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK().

    Learn more

    " }, "RegisterGameServer":{ "name":"RegisterGameServer", @@ -1616,7 +1616,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift FleetIQ to track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status, identify the game server and game server group and specify the current utilization status. Use this status to identify when game servers are currently hosting games and when they are available to be claimed.

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift FleetIQ track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status from AVAILABLE (when the game server is available to be claimed) to UTILIZED (when the game server is currently hosting games). Identify the game server and game server group and specify the new utilization status. You can't change the status from to UTILIZED to AVAILABLE .

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift FleetIQ Guide

    " }, "UpdateGameServerGroup":{ "name":"UpdateGameServerGroup", @@ -1873,18 +1873,18 @@ "members":{ "AccessKeyId":{ "shape":"NonEmptyString", - "documentation":"

    Temporary key allowing access to the Amazon GameLift S3 account.

    " + "documentation":"

    The access key ID that identifies the temporary security credentials.

    " }, "SecretAccessKey":{ "shape":"NonEmptyString", - "documentation":"

    Temporary secret key allowing access to the Amazon GameLift S3 account.

    " + "documentation":"

    The secret access key that can be used to sign requests.

    " }, "SessionToken":{ "shape":"NonEmptyString", - "documentation":"

    Token used to associate a specific build ID with the files uploaded using these credentials.

    " + "documentation":"

    The token that users must pass to the service API to use the temporary credentials.

    " } }, - "documentation":"

    Temporary access credentials used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

    ", + "documentation":"

    Amazon Web Services account security credentials that allow interactions with Amazon GameLift resources. The credentials are temporary and valid for a limited time span. You can request fresh credentials at any time.

    Amazon Web Services security credentials consist of three parts: an access key ID, a secret access key, and a session token. You must use all three parts together to authenticate your access requests.

    You need Amazon Web Services credentials for the following tasks:

    • To upload a game server build directly to Amazon GameLift S3 storage using CreateBuild. To get access for this task, call RequestUploadCredentials.

    • To remotely connect to an active Amazon GameLift fleet instances. To get remote access, call GetComputeAccess.

    ", "sensitive":true }, "BackfillMode":{ @@ -2042,27 +2042,27 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet that the compute is registered to.

    " + "documentation":"

    A unique identifier for the fleet that the compute belongs to.

    " }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) of the fleet that the compute is registered to.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the fleet that the compute belongs to.

    " }, "ComputeName":{ "shape":"ComputeName", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    A descriptive label for the compute resource. For instances in a managed EC2 fleet, the compute name is an instance ID.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The ARN that is assigned to the compute resource and uniquely identifies it. ARNs are unique across locations.

    " + "documentation":"

    The ARN that is assigned to a compute resource and uniquely identifies it. ARNs are unique across locations. Instances in managed EC2 fleets are not assigned a ComputeARN.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The IP address of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " }, "DnsName":{ "shape":"DnsName", - "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The DNS name of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " }, "ComputeStatus":{ "shape":"ComputeStatus", @@ -2078,18 +2078,18 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    The type of operating system on your compute resource.

    " + "documentation":"

    The type of operating system on the compute resource.

    " }, "Type":{ "shape":"EC2InstanceType", - "documentation":"

    The compute type that the fleet uses. A fleet can use Anywhere compute resources that you own, or use managed Amazon EC2 instances.

    " + "documentation":"

    The Amazon EC2 instance type that the fleet uses. For registered computes in an Amazon GameLift Anywhere fleet, this property is empty.

    " }, "GameLiftServiceSdkEndpoint":{ "shape":"GameLiftServiceSdkEndpointOutput", - "documentation":"

    The endpoint connection details of the Amazon GameLift SDK endpoint that your game server connects to.

    " + "documentation":"

    The Amazon GameLift SDK endpoint connection for a registered compute resource in an Anywhere fleet. The game servers on the compute use this endpoint to connect to the Amazon GameLift service.

    " } }, - "documentation":"

    Resources used to host your game servers. A compute resource can be managed Amazon GameLift Amazon EC2 instances or your own resources.

    " + "documentation":"

    An Amazon GameLift compute resource for hosting your game servers. A compute can be an EC2instance in a managed EC2 fleet or a registered compute in an Anywhere fleet.

    " }, "ComputeArn":{ "type":"string", @@ -2469,7 +2469,7 @@ }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a remote location with an Amazon Web Services Region code such as us-west-2.

    " + "documentation":"

    A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a remote location with an Amazon Web Services Region code such as us-west-2. When using an Anywhere fleet, this parameter is required and must be set to the Anywhere fleet's custom location.

    " } } }, @@ -3058,11 +3058,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    >A unique identifier for the fleet the compute resource is registered to.

    " + "documentation":"

    A unique identifier for the fleet the compute resource is currently registered to.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you want to delete.

    " + "documentation":"

    The name of the compute resource to remove from the specified Anywhere fleet.

    " } } }, @@ -3135,11 +3135,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet the compute is registered to.

    " + "documentation":"

    A unique identifier for the fleet that the compute is registered to. You can use either the fleet ID or ARN value.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    The unique identifier of the compute resource to retrieve properties for. For an Anywhere fleet compute, use the registered compute name. For a managed EC2 fleet instance, use the instance ID.

    " } } }, @@ -3148,7 +3148,7 @@ "members":{ "Compute":{ "shape":"Compute", - "documentation":"

    The details of the compute resource you registered to the specified fleet.

    " + "documentation":"

    The set of properties for the requested compute resource.

    " } } }, @@ -3226,7 +3226,7 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacityList", - "documentation":"

    A collection of objects that contains capacity information for each requested fleet ID. Capacity objects are returned only for fleets that currently exist.

    " + "documentation":"

    A collection of objects that contains capacity information for each requested fleet ID. Capacity objects are returned only for fleets that currently exist. Changes in desired instance value can take up to 1 minute to be reflected.

    " }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -3338,7 +3338,7 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacity", - "documentation":"

    Resource capacity information for the requested fleet location. Capacity objects are returned only for fleets and locations that currently exist.

    " + "documentation":"

    Resource capacity information for the requested fleet location. Capacity objects are returned only for fleets and locations that currently exist. Changes in desired instance value can take up to 1 minute to be reflected.

    " } } }, @@ -3964,7 +3964,7 @@ "members":{ "DESIRED":{ "shape":"WholeNumber", - "documentation":"

    Ideal number of active instances. GameLift will always try to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances.

    " + "documentation":"

    Requested number of active instances. Amazon GameLift takes action as needed to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances. A change in the desired instances value can take up to 1 minute to be reflected when viewing a fleet's capacity settings.

    " }, "MINIMUM":{ "shape":"WholeNumber", @@ -4135,7 +4135,68 @@ "r5d.8xlarge", "r5d.12xlarge", "r5d.16xlarge", - "r5d.24xlarge" + "r5d.24xlarge", + "m6g.medium", + "m6g.large", + "m6g.xlarge", + "m6g.2xlarge", + "m6g.4xlarge", + "m6g.8xlarge", + "m6g.12xlarge", + "m6g.16xlarge", + "c6g.medium", + "c6g.large", + "c6g.xlarge", + "c6g.2xlarge", + "c6g.4xlarge", + "c6g.8xlarge", + "c6g.12xlarge", + "c6g.16xlarge", + "r6g.medium", + "r6g.large", + "r6g.xlarge", + "r6g.2xlarge", + "r6g.4xlarge", + "r6g.8xlarge", + "r6g.12xlarge", + "r6g.16xlarge", + "c6gn.medium", + "c6gn.large", + "c6gn.xlarge", + "c6gn.2xlarge", + "c6gn.4xlarge", + "c6gn.8xlarge", + "c6gn.12xlarge", + "c6gn.16xlarge", + "c7g.medium", + "c7g.large", + "c7g.xlarge", + "c7g.2xlarge", + "c7g.4xlarge", + "c7g.8xlarge", + "c7g.12xlarge", + "c7g.16xlarge", + "r7g.medium", + "r7g.large", + "r7g.xlarge", + "r7g.2xlarge", + "r7g.4xlarge", + "r7g.8xlarge", + "r7g.12xlarge", + "r7g.16xlarge", + "m7g.medium", + "m7g.large", + "m7g.xlarge", + "m7g.2xlarge", + "m7g.4xlarge", + "m7g.8xlarge", + "m7g.12xlarge", + "m7g.16xlarge", + "g5g.xlarge", + "g5g.2xlarge", + "g5g.4xlarge", + "g5g.8xlarge", + "g5g.16xlarge" ] }, "Event":{ @@ -4151,7 +4212,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully downloaded the build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now running the installation scripts.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is trying to launch an instance and test the connectivity between the build and the Amazon GameLift Service via the Server SDK.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and the GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected. Check your game session log to see why InitSDK() was not called in time.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly after OnProcessTerminate() was sent within the time expected. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " + "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully downloaded the build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now running the installation scripts.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is trying to launch an instance and test the connectivity between the build and the Amazon GameLift Service via the Server SDK.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and the GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    • INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected. Check your game session log to see why InitSDK() was not called in time.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly after OnProcessTerminate() was sent within the time expected. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " }, "Message":{ "shape":"NonEmptyString", @@ -4876,7 +4937,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

    " + "documentation":"

    The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

    " }, "CreationTime":{ "shape":"Timestamp", @@ -4932,14 +4993,14 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

    Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

    " + "documentation":"

    Information about the matchmaking process that resulted in the game session, if matchmaking was used. Data is in JSON syntax, formatted as a string. Information includes the matchmaker ID as well as player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

    " }, "Location":{ "shape":"LocationStringModel", "documentation":"

    The fleet location where the game session is running. This value might specify the fleet's home Region or a remote location. Location is expressed as an Amazon Web Services Region code such as us-west-2.

    " } }, - "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " + "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Amazon GameLift retains a game session resource for 30 days after the game session ends. You can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", @@ -5020,7 +5081,7 @@ }, "Status":{ "shape":"GameSessionPlacementState", - "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is currently in the queue waiting to be processed.

    • FULFILLED -- A new game session and player sessions (if requested) have been successfully created. Values for GameSessionArn and GameSessionRegion are available.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " + "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final.

    • FULFILLED -- A new game session has been successfully placed. Game session properties are now final.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " }, "GameProperties":{ "shape":"GamePropertyList", @@ -5036,15 +5097,15 @@ }, "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the game session. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    A unique identifier for the game session. This value isn't final until placement status is FULFILLED.

    " }, "GameSessionArn":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is FULFILLED). This identifier is unique across all Regions. You can use this value as a GameSessionId value as needed.

    " + "documentation":"

    Identifier for the game session created by this placement request. This identifier is unique across all Regions. This value isn't final until placement status is FULFILLED.

    " }, "GameSessionRegion":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Name of the Region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    Name of the Region where the game session created by this placement request is running. This value isn't final until placement status is FULFILLED.

    " }, "PlayerLatencies":{ "shape":"PlayerLatencyList", @@ -5060,7 +5121,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "DnsName":{ "shape":"DnsName", @@ -5068,11 +5129,11 @@ }, "Port":{ "shape":"PortNumber", - "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", - "documentation":"

    A collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID.

    " + "documentation":"

    A collection of information on player sessions created in response to the game session placement request. These player sessions are created only after a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID, provided in the placement request, and a corresponding player session ID.

    " }, "GameSessionData":{ "shape":"LargeGameSessionData", @@ -5083,7 +5144,7 @@ "documentation":"

    Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

    " } }, - "documentation":"

    This object includes the full details of the original request plus the current status and start/end time stamps.

    " + "documentation":"

    Represents a potential game session placement, including the full details of the original placement request and the current status.

    If the game session placement status is PENDING, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED. When the placement is in PENDING status, Amazon GameLift may attempt to place a game session multiple times before succeeding. With each attempt it creates a GameSession object and updates this placement object with the new game session properties..

    " }, "GameSessionPlacementState":{ "type":"string", @@ -5200,11 +5261,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet that the compute resource is registered to.

    " + "documentation":"

    A unique identifier for the fleet that contains the compute resource you want to connect to. You can use either the fleet ID or ARN value.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you are requesting credentials for.

    " + "documentation":"

    A unique identifier for the compute resource that you want to connect to. You can use either a registered compute name or an instance ID.

    " } } }, @@ -5213,7 +5274,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    The fleet ID of compute resource.

    " + "documentation":"

    The ID of the fleet that contains the compute resource to be accessed.

    " }, "FleetArn":{ "shape":"FleetArn", @@ -5221,15 +5282,15 @@ }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you requested credentials for.

    " + "documentation":"

    The identifier of the compute resource to be accessed. This value might be either a compute name or an instance ID.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "Credentials":{ "shape":"AwsCredentials", - "documentation":"

    The access credentials for the compute resource.

    " + "documentation":"

    A set of temporary Amazon Web Services credentials for use when connecting to the compute resource with Amazon EC2 Systems Manager (SSM).

    " } } }, @@ -5263,19 +5324,19 @@ }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you are requesting the authentication token for.

    " + "documentation":"

    The name of the compute resource that the authentication token is issued to.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "AuthToken":{ "shape":"ComputeAuthToken", - "documentation":"

    The authentication token that your game server uses to authenticate with Amazon GameLift.

    " + "documentation":"

    A valid temporary authentication token.

    " }, "ExpirationTimestamp":{ "shape":"Timestamp", - "documentation":"

    The amount of time until the authentication token is no longer valid. To continue using the compute resource for game server hosting, renew the authentication token by using this operation again.

    " + "documentation":"

    The amount of time until the authentication token is no longer valid.

    " } } }, @@ -5307,11 +5368,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

    " + "documentation":"

    A unique identifier for the fleet that contains the instance you want to access. You can request access to instances in EC2 fleets with the following statuses: ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value.

    You can access fleets in ERROR status for a short period of time before Amazon GameLift deletes them.

    " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

    A unique identifier for the instance you want to get access to. You can access an instance in any status.

    " + "documentation":"

    A unique identifier for the instance you want to access. You can access an instance in any status.

    " } } }, @@ -5349,7 +5410,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet that the instance is in.

    " + "documentation":"

    A unique identifier for the fleet that the instance belongs to.

    " }, "FleetArn":{ "shape":"FleetArn", @@ -5365,15 +5426,15 @@ }, "DnsName":{ "shape":"DnsName", - "documentation":"

    The DNS identifier assigned to the instance that is running the game session. Values have the following format:

    • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

    • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

    When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

    " + "documentation":"

    The DNS identifier assigned to the instance that is running the game session. Values have the following format:

    When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

    " }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    Operating system that is running on this instance.

    " + "documentation":"

    Operating system that is running on this EC2 instance.

    " }, "Type":{ "shape":"EC2InstanceType", - "documentation":"

    Amazon EC2 instance type that defines the computing resources of this instance.

    " + "documentation":"

    EC2 instance type that defines the computing resources of this instance.

    " }, "Status":{ "shape":"InstanceStatus", @@ -5388,22 +5449,22 @@ "documentation":"

    The fleet location of the instance, expressed as an Amazon Web Services Region code, such as us-west-2.

    " } }, - "documentation":"

    Represents an EC2 instance of virtual computing resources that hosts one or more game servers. In Amazon GameLift, a fleet can contain zero or more instances.

    Related actions

    " + "documentation":"

    Represents a virtual computing instance that runs game server processes and hosts game sessions. In Amazon GameLift, one or more instances make up a managed EC2 fleet.

    " }, "InstanceAccess":{ "type":"structure", "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet containing the instance being accessed.

    " + "documentation":"

    A unique identifier for the fleet containing the instance to be accessed.

    " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

    A unique identifier for the instance being accessed.

    " + "documentation":"

    A unique identifier for the instance to be accessed.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    IP address that is assigned to the instance.

    " + "documentation":"

    IP address assigned to the instance.

    " }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -5411,24 +5472,24 @@ }, "Credentials":{ "shape":"InstanceCredentials", - "documentation":"

    Credentials required to access the instance.

    " + "documentation":"

    Security credentials that are required to access the instance.

    " } }, - "documentation":"

    Information required to remotely connect to a fleet instance.

    " + "documentation":"

    Information and credentials that you can use to remotely connect to an instance in an EC2 managed fleet. This data type is returned in response to a call to GetInstanceAccess.

    " }, "InstanceCredentials":{ "type":"structure", "members":{ "UserName":{ "shape":"NonEmptyString", - "documentation":"

    User login string.

    " + "documentation":"

    A user name for logging in.

    " }, "Secret":{ "shape":"NonEmptyString", - "documentation":"

    Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it is a private key (which must be saved as a .pem file) for use with SSH.

    " + "documentation":"

    Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it's a private key for use with SSH.

    " } }, - "documentation":"

    Set of credentials required to remotely access a fleet instance.

    ", + "documentation":"

    A set of credentials that allow remote access to an instance in an EC2 managed fleet. These credentials are returned in response to a call to GetInstanceAccess, which requests access for instances that are running game servers with the Amazon GameLift server SDK version 4.x or earlier.

    ", "sensitive":true }, "InstanceDefinition":{ @@ -5684,11 +5745,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet the compute resources are registered to.

    " + "documentation":"

    A unique identifier for the fleet to retrieve compute resources for.

    " }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    The name of the custom location that the compute resources are assigned to.

    " + "documentation":"

    The name of a location to retrieve compute resources for.

    " }, "Limit":{ "shape":"PositiveInteger", @@ -5705,7 +5766,7 @@ "members":{ "ComputeList":{ "shape":"ComputeList", - "documentation":"

    A list of compute resources registered to the fleet you specified.

    " + "documentation":"

    A list of compute resources in the specified fleet.

    " }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -6672,23 +6733,23 @@ }, "ComputeName":{ "shape":"ComputeName", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    A descriptive label for the compute resource.

    " }, "CertificatePath":{ "shape":"NonZeroAndMaxString", - "documentation":"

    The path to the TLS certificate on your compute resource. The path and certificate are not validated by Amazon GameLift.

    " + "documentation":"

    The path to a TLS certificate on your compute resource. Amazon GameLift doesn't validate the path and certificate.

    " }, "DnsName":{ "shape":"DnsNameInput", - "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires either a DNS name or IP address.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The IP address of the compute resource. Amazon GameLift requires either a DNS name or IP address.

    " }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    The name of the custom location you added to the fleet you are registering this compute resource to.

    " + "documentation":"

    The name of a custom location to associate with the compute resource being registered.

    " } } }, @@ -6697,7 +6758,7 @@ "members":{ "Compute":{ "shape":"Compute", - "documentation":"

    The details of the compute resource you registered to the specified fleet.

    " + "documentation":"

    The details of the compute resource you registered.

    " } } }, @@ -7092,7 +7153,7 @@ "members":{ "LaunchPath":{ "shape":"LaunchPathStringModel", - "documentation":"

    The location of a game build executable or the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    " + "documentation":"

    The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    Amazon GameLift doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations initSDK() and ProcessReady().

    " }, "Parameters":{ "shape":"LaunchParametersStringModel", @@ -7593,7 +7654,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

    The game session protection policy to apply to all new instances created in this fleet. Instances that already exist are not affected. You can set protection for individual instances using UpdateGameSession .

    • NoProtection -- The game session can be terminated during a scale-down event.

    • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    " + "documentation":"

    The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession .

    • NoProtection -- The game session can be terminated during a scale-down event.

    • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    " }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", @@ -7632,7 +7693,7 @@ }, "DesiredInstances":{ "shape":"WholeNumber", - "documentation":"

    The number of Amazon EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits.

    " + "documentation":"

    The number of Amazon EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits. Changes in desired instance value can take up to 1 minute to be reflected when viewing the fleet's capacity settings.

    " }, "MinSize":{ "shape":"WholeNumber", @@ -7752,7 +7813,7 @@ }, "UtilizationStatus":{ "shape":"GameServerUtilizationStatus", - "documentation":"

    Indicates whether the game server is available or is currently hosting gameplay.

    " + "documentation":"

    Indicates if the game server is available or is currently hosting gameplay. You can update a game server status from AVAILABLE to UTILIZED, but you can't change a the status from UTILIZED to AVAILABLE.

    " }, "HealthCheck":{ "shape":"GameServerHealthCheck", diff --git a/services/gamesparks/pom.xml b/services/gamesparks/pom.xml index 8ad8c73c5396..c6e4c4966858 100644 --- a/services/gamesparks/pom.xml +++ b/services/gamesparks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT gamesparks AWS Java SDK :: Services :: Game Sparks diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 3eb80c729a5b..fbf8b9b971d6 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index b9a66177e180..6caffdfd77fa 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config index f35548b15b28..52f3ddd37a79 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config +++ b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listAccelerators" ], "defaultSimpleMethodTestRegion": "US_WEST_2", - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeAcceleratorAttributes" ] } diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json index cb7a9c9eda57..d95f9dcbf71b 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json @@ -1948,7 +1948,7 @@ }, "ClientIPPreservationEnabled":{ "shape":"GenericBoolean", - "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers and Amazon EC2 instances.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " + "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers, Amazon EC2 instances, and Network Load Balancers with Security Groups. IMPORTANT: You cannot use client IP address preservation with Network Load Balancers with TLS listeners.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " } }, "documentation":"

    A complex type for endpoints. A resource must be valid and active when you add it as an endpoint.

    " @@ -1980,7 +1980,7 @@ }, "ClientIPPreservationEnabled":{ "shape":"GenericBoolean", - "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers and Amazon EC2 instances.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " + "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers, Amazon EC2 instances, and Network Load Balancers with Security Groups. IMPORTANT: You cannot use client IP address preservation with Network Load Balancers with TLS listeners.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " } }, "documentation":"

    A complex type for an endpoint. Each endpoint group can include one or more endpoints, such as load balancers.

    " diff --git a/services/glue/pom.xml b/services/glue/pom.xml index b9957566262c..693837f93b51 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/customization.config b/services/glue/src/main/resources/codegen-resources/customization.config index 6abaf75b14b1..e5d2b586984e 100644 --- a/services/glue/src/main/resources/codegen-resources/customization.config +++ b/services/glue/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 5084c09af879..2ccbfb1c6653 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -6562,6 +6562,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    Creates a list of supported custom datatypes.

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    Specifies a custom CSV classifier for CreateClassifier to create.

    " @@ -7613,6 +7617,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    A list of custom datatypes including \"BINARY\", \"BOOLEAN\", \"DATE\", \"DECIMAL\", \"DOUBLE\", \"FLOAT\", \"INT\", \"LONG\", \"SHORT\", \"STRING\", \"TIMESTAMP\".

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    A classifier for custom CSV content.

    " @@ -7641,6 +7649,14 @@ "min":1, "pattern":"[^\\r\\n]" }, + "CsvSerdeOption":{ + "type":"string", + "enum":[ + "OpenCSVSerDe", + "LazySimpleSerDe", + "None" + ] + }, "CustomCode":{ "type":"structure", "required":[ @@ -12772,6 +12788,10 @@ "documentation":"

    The same unique identifier was associated with two different records.

    ", "exception":true }, + "IdleTimeout":{ + "type":"integer", + "box":true + }, "IllegalBlueprintStateException":{ "type":"structure", "members":{ @@ -17795,6 +17815,30 @@ "GlueVersion":{ "shape":"GlueVersionString", "documentation":"

    The Glue version determines the versions of Apache Spark and Python that Glue supports. The GlueVersion must be greater than 2.0.

    " + }, + "NumberOfWorkers":{ + "shape":"NullableInteger", + "documentation":"

    The number of workers of a defined WorkerType to use for the session.

    " + }, + "WorkerType":{ + "shape":"WorkerType", + "documentation":"

    The type of predefined worker that is allocated when a session runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value Z.2X for Ray sessions.

    " + }, + "CompletedOn":{ + "shape":"TimestampValue", + "documentation":"

    The date and time that this session is completed.

    " + }, + "ExecutionTime":{ + "shape":"NullableDouble", + "documentation":"

    The total time the session ran for.

    " + }, + "DPUSeconds":{ + "shape":"NullableDouble", + "documentation":"

    The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity).

    " + }, + "IdleTimeout":{ + "shape":"IdleTimeout", + "documentation":"

    The number of minutes when idle before the session times out.

    " } }, "documentation":"

    The period in which a remote Spark runtime environment is running.

    " @@ -20074,6 +20118,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    Specifies a list of supported custom datatypes.

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    Specifies a custom CSV classifier to be updated.

    " diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 0483115834c8..ab0226d7db63 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/grafana/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/grafana/src/main/resources/codegen-resources/endpoint-rule-set.json index d4486287034b..6c513fce87e1 100644 --- a/services/grafana/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/grafana/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://grafana-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://grafana-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://grafana-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://grafana-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://grafana.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://grafana.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://grafana.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://grafana.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/grafana/src/main/resources/codegen-resources/service-2.json b/services/grafana/src/main/resources/codegen-resources/service-2.json index 270eaa088518..1b29063437b0 100644 --- a/services/grafana/src/main/resources/codegen-resources/service-2.json +++ b/services/grafana/src/main/resources/codegen-resources/service-2.json @@ -1252,7 +1252,8 @@ }, "RoleValueList":{ "type":"list", - "member":{"shape":"RoleValue"} + "member":{"shape":"RoleValue"}, + "sensitive":true }, "RoleValues":{ "type":"structure", @@ -1326,7 +1327,7 @@ "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupId"}, - "max":100, + "max":5, "min":1 }, "ServiceQuotaExceededException":{ @@ -1382,8 +1383,8 @@ "SubnetIds":{ "type":"list", "member":{"shape":"SubnetId"}, - "max":100, - "min":1 + "max":6, + "min":2 }, "TagKey":{ "type":"string", diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 16e9e3af33a0..32a42719eb5f 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrass/src/main/resources/codegen-resources/customization.config b/services/greengrass/src/main/resources/codegen-resources/customization.config index 431c95903309..85d0fe29ba96 100644 --- a/services/greengrass/src/main/resources/codegen-resources/customization.config +++ b/services/greengrass/src/main/resources/codegen-resources/customization.config @@ -12,7 +12,7 @@ "listResourceDefinitions", "listSubscriptionDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "associateServiceRoleToAccount", "createCoreDefinition", "createConnectorDefinition", diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index c3416ac7364d..8d63f9b20cb5 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index ad3fe417bf33..51a798dc5bb8 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index f6278bcd72a3..69b240b41a00 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/customization.config b/services/guardduty/src/main/resources/codegen-resources/customization.config index 85c8c09b6c47..3d541f6b5b27 100644 --- a/services/guardduty/src/main/resources/codegen-resources/customization.config +++ b/services/guardduty/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listDetectors", "listInvitations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createDetector", "declineInvitations", "deleteInvitations" diff --git a/services/health/pom.xml b/services/health/pom.xml index 95ef8e2589cb..098681745427 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/health/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/health/src/main/resources/codegen-resources/endpoint-rule-set.json index d8ace323eae3..407b069f5da0 100644 --- a/services/health/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/health/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,278 +140,229 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://health-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://health-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://health-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://health.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], + "conditions": [], "endpoint": { - "url": "https://global.health.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "health", - "signingRegion": "us-east-1" - } - ] - }, + "url": "https://health-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-cn-global" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://global.health.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "health", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://health.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://health.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-global" + ] + } + ], + "endpoint": { + "url": "https://global.health.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "aws-cn-global" + ] + } + ], + "endpoint": { + "url": "https://global.health.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "cn-northwest-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://health.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/health/src/main/resources/codegen-resources/service-2.json b/services/health/src/main/resources/codegen-resources/service-2.json index d0ba04c00965..0097679b7de5 100644 --- a/services/health/src/main/resources/codegen-resources/service-2.json +++ b/services/health/src/main/resources/codegen-resources/service-2.json @@ -68,6 +68,17 @@ "documentation":"

    Returns the number of entities that are affected by each of the specified events.

    ", "idempotent":true }, + "DescribeEntityAggregatesForOrganization":{ + "name":"DescribeEntityAggregatesForOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEntityAggregatesForOrganizationRequest"}, + "output":{"shape":"DescribeEntityAggregatesForOrganizationResponse"}, + "documentation":"

    Returns a list of entity aggregates for your Organizations that are affected by each of the specified events.

    ", + "idempotent":true + }, "DescribeEventAggregates":{ "name":"DescribeEventAggregates", "http":{ @@ -191,6 +202,28 @@ } }, "shapes":{ + "AccountEntityAggregate":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"eventArn", + "documentation":"

    The 12-digit Amazon Web Services account numbers that contains the affected entities.

    " + }, + "count":{ + "shape":"count", + "documentation":"

    The number of entities that match the filter criteria for the specified events.

    " + }, + "statuses":{ + "shape":"entityStatuses", + "documentation":"

    The number of affected entities aggregated by the entity status codes.

    " + } + }, + "documentation":"

    The number of entities in an account that are impacted by a specific event aggregated by the entity status codes.

    " + }, + "AccountEntityAggregatesList":{ + "type":"list", + "member":{"shape":"AccountEntityAggregate"} + }, "AffectedEntity":{ "type":"structure", "members":{ @@ -292,11 +325,12 @@ }, "DescribeAffectedEntitiesForOrganizationRequest":{ "type":"structure", - "required":["organizationEntityFilters"], "members":{ "organizationEntityFilters":{ "shape":"OrganizationEntityFiltersList", - "documentation":"

    A JSON set of elements including the awsAccountId and the eventArn.

    " + "documentation":"

    A JSON set of elements including the awsAccountId and the eventArn.

    ", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use organizationEntityAccountFilters instead." }, "locale":{ "shape":"locale", @@ -309,6 +343,10 @@ "maxResults":{ "shape":"maxResultsLowerRange", "documentation":"

    The maximum number of items to return in one batch, between 10 and 100, inclusive.

    " + }, + "organizationEntityAccountFilters":{ + "shape":"OrganizationEntityAccountFiltersList", + "documentation":"

    A JSON set of elements including the awsAccountId, eventArn and a set of statusCodes.

    " } } }, @@ -364,6 +402,29 @@ } } }, + "DescribeEntityAggregatesForOrganizationRequest":{ + "type":"structure", + "required":["eventArns"], + "members":{ + "eventArns":{ + "shape":"OrganizationEventArnsList", + "documentation":"

    A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

    " + }, + "awsAccountIds":{ + "shape":"OrganizationAccountIdsList", + "documentation":"

    A list of 12-digit Amazon Web Services account numbers that contains the affected entities.

    " + } + } + }, + "DescribeEntityAggregatesForOrganizationResponse":{ + "type":"structure", + "members":{ + "organizationEntityAggregates":{ + "shape":"OrganizationEntityAggregatesList", + "documentation":"

    The list of entity aggregates for each of the specified accounts that are affected by each of the specified events.

    " + } + } + }, "DescribeEntityAggregatesRequest":{ "type":"structure", "members":{ @@ -598,6 +659,25 @@ } } }, + "EntityAccountFilter":{ + "type":"structure", + "required":["eventArn"], + "members":{ + "eventArn":{ + "shape":"eventArn", + "documentation":"

    The unique identifier for the event. The event ARN has the arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID format.

    For example, an event ARN might look like the following:

    arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456

    " + }, + "awsAccountId":{ + "shape":"accountId", + "documentation":"

    The 12-digit Amazon Web Services account numbers that contains the affected entities.

    " + }, + "statusCodes":{ + "shape":"entityStatusCodeList", + "documentation":"

    A list of entity status codes.

    " + } + }, + "documentation":"

    A JSON set of elements including the awsAccountId, eventArn and a set of statusCodes.

    " + }, "EntityAggregate":{ "type":"structure", "members":{ @@ -608,6 +688,10 @@ "count":{ "shape":"count", "documentation":"

    The number of entities that match the criteria for the specified events.

    " + }, + "statuses":{ + "shape":"entityStatuses", + "documentation":"

    The number of affected entities aggregated by the entity status codes.

    " } }, "documentation":"

    The number of entities that are affected by one or more events. Returned by the DescribeEntityAggregates operation.

    " @@ -908,6 +992,12 @@ "documentation":"

    The specified pagination token (nextToken) is not valid.

    ", "exception":true }, + "OrganizationAccountIdsList":{ + "type":"list", + "member":{"shape":"accountId"}, + "max":25, + "min":1 + }, "OrganizationAffectedEntitiesErrorItem":{ "type":"structure", "members":{ @@ -930,6 +1020,38 @@ }, "documentation":"

    Error information returned when a DescribeAffectedEntitiesForOrganization operation can't find or process a specific entity.

    " }, + "OrganizationEntityAccountFiltersList":{ + "type":"list", + "member":{"shape":"EntityAccountFilter"}, + "max":10, + "min":1 + }, + "OrganizationEntityAggregate":{ + "type":"structure", + "members":{ + "eventArn":{ + "shape":"eventArn", + "documentation":"

    A list of event ARNs (unique identifiers). For example: \"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"

    " + }, + "count":{ + "shape":"count", + "documentation":"

    The number of entities for the organization that match the filter criteria for the specified events.

    " + }, + "statuses":{ + "shape":"entityStatuses", + "documentation":"

    The number of affected entities aggregated by the entitiy status codes.

    " + }, + "accounts":{ + "shape":"AccountEntityAggregatesList", + "documentation":"

    A list of entity aggregates for each of the specified accounts in your organization that are affected by a specific event. If there are no awsAccountIds provided in the request, this field will be empty in the response.

    " + } + }, + "documentation":"

    The aggregate results of entities affected by the specified event in your organization. The results are aggregated by the entity status codes for the specified set of accountsIDs.

    " + }, + "OrganizationEntityAggregatesList":{ + "type":"list", + "member":{"shape":"OrganizationEntityAggregate"} + }, "OrganizationEntityFiltersList":{ "type":"list", "member":{"shape":"EventAccountFilter"}, @@ -982,6 +1104,12 @@ }, "documentation":"

    Summary information about an event, returned by the DescribeEventsForOrganization operation.

    " }, + "OrganizationEventArnsList":{ + "type":"list", + "member":{"shape":"eventArn"}, + "max":25, + "min":1 + }, "OrganizationEventDetailFiltersList":{ "type":"list", "member":{"shape":"EventAccountFilter"}, @@ -1128,15 +1256,22 @@ "enum":[ "IMPAIRED", "UNIMPAIRED", - "UNKNOWN" + "UNKNOWN", + "PENDING", + "RESOLVED" ] }, "entityStatusCodeList":{ "type":"list", "member":{"shape":"entityStatusCode"}, - "max":3, + "max":5, "min":1 }, + "entityStatuses":{ + "type":"map", + "key":{"shape":"entityStatusCode"}, + "value":{"shape":"count"} + }, "entityUrl":{"type":"string"}, "entityValue":{ "type":"string", diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 1ec3aa9d8349..4cfa4c29f242 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 770e789c2e26..49d69b13a321 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index fcc8321ba2b9..3cf4eb090761 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/customization.config b/services/iam/src/main/resources/codegen-resources/customization.config index 0f65ae3b4f98..78b7c2df70ec 100644 --- a/services/iam/src/main/resources/codegen-resources/customization.config +++ b/services/iam/src/main/resources/codegen-resources/customization.config @@ -26,7 +26,7 @@ "getAccountAuthorizationDetails", "listPolicies" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateAccountPasswordPolicy" ], "excludeClientCreateMethod": true, diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 0d21860fc982..382f98912e9c 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/identitystore/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/identitystore/src/main/resources/codegen-resources/endpoint-rule-set.json index 4c57a1482291..f5ff60ae89cb 100644 --- a/services/identitystore/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/identitystore/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://identitystore-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://identitystore-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://identitystore.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://identitystore-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://identitystore.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://identitystore-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://identitystore.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://identitystore.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://identitystore.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://identitystore.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/identitystore/src/main/resources/codegen-resources/endpoint-tests.json b/services/identitystore/src/main/resources/codegen-resources/endpoint-tests.json index 128082116cc8..a703a4cc89b4 100644 --- a/services/identitystore/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/identitystore/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -345,8 +345,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -358,8 +358,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -369,8 +369,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -382,8 +382,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -393,8 +393,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -406,8 +406,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -417,8 +417,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -430,8 +430,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -443,8 +443,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -456,8 +456,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -468,8 +468,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -480,8 +480,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/identitystore/src/main/resources/codegen-resources/service-2.json b/services/identitystore/src/main/resources/codegen-resources/service-2.json index 1c3748844b01..8e4336441fd8 100644 --- a/services/identitystore/src/main/resources/codegen-resources/service-2.json +++ b/services/identitystore/src/main/resources/codegen-resources/service-2.json @@ -143,7 +143,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves the group metadata and attributes from GroupId in an identity store.

    " + "documentation":"

    Retrieves the group metadata and attributes from GroupId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "DescribeGroupMembership":{ "name":"DescribeGroupMembership", @@ -160,7 +160,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves membership metadata and attributes from MembershipId in an identity store.

    " + "documentation":"

    Retrieves membership metadata and attributes from MembershipId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "DescribeUser":{ "name":"DescribeUser", @@ -177,7 +177,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves the user metadata and attributes from the UserId in an identity store.

    " + "documentation":"

    Retrieves the user metadata and attributes from the UserId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "GetGroupId":{ "name":"GetGroupId", @@ -194,7 +194,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves GroupId in an identity store.

    " + "documentation":"

    Retrieves GroupId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "GetGroupMembershipId":{ "name":"GetGroupMembershipId", @@ -211,7 +211,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves the MembershipId in an identity store.

    " + "documentation":"

    Retrieves the MembershipId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "GetUserId":{ "name":"GetUserId", @@ -228,7 +228,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Retrieves the UserId in an identity store.

    " + "documentation":"

    Retrieves the UserId in an identity store.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "IsMemberInGroups":{ "name":"IsMemberInGroups", @@ -245,7 +245,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Checks the user's membership in all requested groups and returns if the member exists in all queried groups.

    " + "documentation":"

    Checks the user's membership in all requested groups and returns if the member exists in all queried groups.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "ListGroupMemberships":{ "name":"ListGroupMemberships", @@ -262,7 +262,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    For the specified group in the specified identity store, returns the list of all GroupMembership objects and returns results in paginated form.

    " + "documentation":"

    For the specified group in the specified identity store, returns the list of all GroupMembership objects and returns results in paginated form.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "ListGroupMembershipsForMember":{ "name":"ListGroupMembershipsForMember", @@ -279,7 +279,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    For the specified member in the specified identity store, returns the list of all GroupMembership objects and returns results in paginated form.

    " + "documentation":"

    For the specified member in the specified identity store, returns the list of all GroupMembership objects and returns results in paginated form.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "ListGroups":{ "name":"ListGroups", @@ -296,7 +296,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Lists all groups in the identity store. Returns a paginated list of complete Group objects. Filtering for a Group by the DisplayName attribute is deprecated. Instead, use the GetGroupId API action.

    " + "documentation":"

    Lists all groups in the identity store. Returns a paginated list of complete Group objects. Filtering for a Group by the DisplayName attribute is deprecated. Instead, use the GetGroupId API action.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "ListUsers":{ "name":"ListUsers", @@ -313,7 +313,7 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

    Lists all users in the identity store. Returns a paginated list of complete User objects. Filtering for a User by the UserName attribute is deprecated. Instead, use the GetUserId API action.

    " + "documentation":"

    Lists all users in the identity store. Returns a paginated list of complete User objects. Filtering for a User by the UserName attribute is deprecated. Instead, use the GetUserId API action.

    If you have administrator access to a member account, you can use this API from the member account. Read about member accounts in the Organizations User Guide.

    " }, "UpdateGroup":{ "name":"UpdateGroup", @@ -436,7 +436,7 @@ }, "AttributeValue":{ "shape":"AttributeValue", - "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the AWS CLI.

    " + "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the CLI.

    " } }, "documentation":"

    An operation that applies to the requested group. This operation might add, replace, or remove an attribute.

    " @@ -457,7 +457,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the AWS CLI.

    ", + "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the CLI.

    ", "document":true }, "ConflictException":{ @@ -532,7 +532,7 @@ }, "DisplayName":{ "shape":"GroupDisplayName", - "documentation":"

    A string containing the name of the group. This value is commonly displayed when the group is referenced. \"Administrator\" and \"AWSAdministrators\" are reserved names and can't be used for users or groups.

    " + "documentation":"

    A string containing the name of the group. This value is commonly displayed when the group is referenced. Administrator and AWSAdministrators are reserved names and can't be used for users or groups.

    " }, "Description":{ "shape":"SensitiveStringType", @@ -567,7 +567,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

    A unique string used to identify the user. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store. \"Administrator\" and \"AWSAdministrators\" are reserved names and can't be used for users or groups.

    " + "documentation":"

    A unique string used to identify the user. The length limit is 128 characters. This value can consist of letters, accented characters, symbols, numbers, and punctuation. This value is specified at the time the user is created and stored as an attribute of the user object in the identity store. Administrator and AWSAdministrators are reserved names and can't be used for users or groups.

    " }, "Name":{ "shape":"Name", @@ -1430,7 +1430,7 @@ "type":"string", "max":65535, "min":1, - "pattern":"[-a-zA-Z0-9+=/:]*" + "pattern":"[-a-zA-Z0-9+=/:_]*" }, "PhoneNumber":{ "type":"structure", @@ -1551,7 +1551,7 @@ }, "AttributeValue":{ "shape":"AttributeValue", - "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the AWS CLI.

    " + "documentation":"

    The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the CLI.

    " } }, "documentation":"

    An entity attribute that's unique to a specific entity.

    " @@ -1708,5 +1708,5 @@ "exception":true } }, - "documentation":"

    The Identity Store service used by AWS IAM Identity Center (successor to AWS Single Sign-On) provides a single place to retrieve all of your identities (users and groups). For more information, see the IAM Identity Center User Guide.

     <note> <p>Although AWS Single Sign-On was renamed, the <code>sso</code> and <code>identitystore</code> API namespaces will continue to retain their original name for backward compatibility purposes. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed">IAM Identity Center rename</a>.</p> </note> <p>This reference guide describes the identity store operations that you can call programatically and includes detailed information about data types and errors.</p> 
    " + "documentation":"

    The Identity Store service used by IAM Identity Center provides a single place to retrieve all of your identities (users and groups). For more information, see the IAM Identity Center User Guide.

    This reference guide describes the identity store operations that you can call programmatically and includes detailed information about data types and errors.

    IAM Identity Center uses the sso and identitystore API namespaces.

    " } diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index b42888fbf85d..a3f00afcdc2f 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 8c6b51f17017..daa207a9b593 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 237e7b59e57d..1a9dba8e632f 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 83fe357ad22a..3db5c28267ca 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 49a840587a19..6ca8676234e7 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/customization.config b/services/iot/src/main/resources/codegen-resources/customization.config index 9f910afc97f7..bc16e09001e9 100644 --- a/services/iot/src/main/resources/codegen-resources/customization.config +++ b/services/iot/src/main/resources/codegen-resources/customization.config @@ -30,7 +30,7 @@ "listThings", "listTopicRules" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "addThingToThingGroup", "removeThingFromThingGroup", "setV2LoggingOptions", diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index d7fef23a2019..3a54005a5966 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config index 0bed30b47978..b947f5dbc959 100644 --- a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config +++ b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listDevices" ] } diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index be8cd289fa6c..9afd42112955 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index d4917204760e..c025865c0d3b 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/customization.config b/services/iotanalytics/src/main/resources/codegen-resources/customization.config index 5837f8c071c6..ba5ea64efd7f 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/iotanalytics/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "listDatastores", "listPipelines" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeLoggingOptions" ], "shapeModifiers": { diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 3907df5f2971..e5d025f1c6bf 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index d9329f3b343d..8b2b04657b74 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index fd0fb0d0722a..9a4e95ca0310 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index adba4e284f75..3a73d1d3f981 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index 809a9f85c9a9..a55e162081d8 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 7b12421d9a4a..8743d586da5d 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index f5e4f9dbb0ae..5583fc5ba585 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index 1dab7a56f867..b0ed483d7ef9 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 8dbff7262020..342deb74e4df 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 98c61b98f5e4..ecd9b37f2750 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index 68abfb48dfa8..5d019f6d8d7d 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index 832a73a6296a..85cd5e46b1b3 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 6d9977ddbda7..b74427ad6565 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index c7c8008a62c4..4a65982eb459 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ivs/src/main/resources/codegen-resources/endpoint-rule-set.json index ca8bf0f0e374..fe19a2be5bef 100644 --- a/services/ivs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ivs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ivs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ivs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://ivs-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ivs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ivs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ivs.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ivs.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ivs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/ivs/src/main/resources/codegen-resources/service-2.json b/services/ivs/src/main/resources/codegen-resources/service-2.json index b537826a475a..88d0ea5f322a 100644 --- a/services/ivs/src/main/resources/codegen-resources/service-2.json +++ b/services/ivs/src/main/resources/codegen-resources/service-2.json @@ -735,7 +735,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

    • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

    • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

    • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

    • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

    • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

    " + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

    " } }, "documentation":"

    Object specifying a channel.

    " @@ -827,7 +827,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

    • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

    • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

    • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

    • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

    • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

    " + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

    " } }, "documentation":"

    Summary information about a channel.

    " @@ -893,7 +893,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

    • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

    • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

    • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

    • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

    • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

    " + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

    " } } }, @@ -2221,7 +2221,7 @@ }, "type":{ "shape":"ChannelType", - "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Some types generate multiple qualities (renditions) from the original input; this automatically gives viewers the best experience for their devices and network conditions. Some types provide transcoded video; transcoding allows higher playback quality across a range of download speeds. Default: STANDARD. Valid values:

    • BASIC: Video is transmuxed: Amazon IVS delivers the original input quality to viewers. The viewer’s video-quality choice is limited to the original input. Input resolution can be up to 1080p and bitrate can be up to 1.5 Mbps for 480p and up to 3.5 Mbps for resolutions between 480p and 1080p. Original audio is passed through.

    • STANDARD: Video is transcoded: multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Transcoding allows higher playback quality across a range of download speeds. Resolution can be up to 1080p and bitrate can be up to 8.5 Mbps. Audio is transcoded only for renditions 360p and below; above that, audio is passed through. This is the default when you create a channel.

    • ADVANCED_SD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at SD quality (480p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    • ADVANCED_HD: Video is transcoded; multiple qualities are generated from the original input, to automatically give viewers the best experience for their devices and network conditions. Input resolution can be up to 1080p and bitrate can be up to 8.5 Mbps; output is capped at HD quality (720p). You can select an optional transcode preset (see below). Audio for all renditions is transcoded, and an audio-only rendition is available.

    Optional transcode presets (available for the ADVANCED types) allow you to trade off available download bandwidth and video quality, to optimize the viewing experience. There are two presets:

    • Constrained bandwidth delivery uses a lower bitrate for each quality level. Use it if you have low download bandwidth and/or simple video content (e.g., talking heads)

    • Higher bandwidth delivery uses a higher bitrate for each quality level. Use it if you have high download bandwidth and/or complex video content (e.g., flashes and quick scene changes).

    " + "documentation":"

    Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types.

    " } } }, diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 654e44d2c1a7..6c295d515e7f 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 655c74f7324c..8a2975ca698d 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 81207b99fe43..9d88ae54f5ad 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 1ca2ce350791..315b38a2c6b4 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-rule-set.json index fe885862d300..c5b90d3def0f 100644 --- a/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -238,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://kafkaconnect.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://kafkaconnect.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-tests.json b/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-tests.json index b76ab1b04405..85a1b319e430 100644 --- a/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/kafkaconnect/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,614 +1,146 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-northeast-2.api.aws" + "url": "https://kafkaconnect.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-northeast-2.amazonaws.com" + "url": "https://kafkaconnect.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, "Region": "ap-northeast-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-northeast-2.api.aws" + "url": "https://kafkaconnect.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-northeast-2.amazonaws.com" + "url": "https://kafkaconnect.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-northeast-1.api.aws" + "url": "https://kafkaconnect.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-northeast-1.amazonaws.com" + "url": "https://kafkaconnect.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-northeast-1.api.aws" + "url": "https://kafkaconnect.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-northeast-1.amazonaws.com" + "url": "https://kafkaconnect.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.sa-east-1.api.aws" + "url": "https://kafkaconnect.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.sa-east-1.amazonaws.com" + "url": "https://kafkaconnect.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.sa-east-1.api.aws" + "url": "https://kafkaconnect.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -619,334 +151,326 @@ } }, "params": { - "UseDualStack": false, "Region": "sa-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://kafkaconnect.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.cn-north-1.amazonaws.com.cn" + "url": "https://kafkaconnect.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://kafkaconnect.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.cn-north-1.amazonaws.com.cn" + "url": "https://kafkaconnect.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-southeast-1.api.aws" + "url": "https://kafkaconnect-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-southeast-1.amazonaws.com" + "url": "https://kafkaconnect-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-southeast-1.api.aws" + "url": "https://kafkaconnect.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-southeast-1.amazonaws.com" + "url": "https://kafkaconnect-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-southeast-2.api.aws" + "url": "https://kafkaconnect-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.ap-southeast-2.amazonaws.com" + "url": "https://kafkaconnect.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-southeast-2.api.aws" + "url": "https://kafkaconnect.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-southeast-2.amazonaws.com" + "url": "https://kafkaconnect-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-east-1.api.aws" + "url": "https://kafkaconnect-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-east-1.amazonaws.com" + "url": "https://kafkaconnect.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-east-1.api.aws" + "url": "https://kafkaconnect.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-east-2.api.aws" + "url": "https://kafkaconnect-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-east-2.api.aws" + "url": "https://kafkaconnect.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://kafkaconnect-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://kafkaconnect.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -956,9 +480,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -968,11 +492,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/kafkaconnect/src/main/resources/codegen-resources/service-2.json b/services/kafkaconnect/src/main/resources/codegen-resources/service-2.json index 2d3b248f262b..4ffaad6a1c31 100644 --- a/services/kafkaconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/kafkaconnect/src/main/resources/codegen-resources/service-2.json @@ -573,7 +573,7 @@ "documentation":"

    Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.

    " }, "connectorConfiguration":{ - "shape":"SyntheticCreateConnectorRequest__mapOf__string", + "shape":"__sensitive__mapOf__string", "documentation":"

    A map of keys to values that represent the configuration for the connector.

    " }, "connectorDescription":{ @@ -698,7 +698,7 @@ "documentation":"

    The name of the worker configuration.

    " }, "propertiesFileContent":{ - "shape":"SyntheticCreateWorkerConfigurationRequest__string", + "shape":"__sensitiveString", "documentation":"

    Base64 encoded contents of connect-distributed.properties file.

    " } } @@ -949,7 +949,7 @@ "documentation":"

    The Amazon Resource Name (ARN) of the connector.

    " }, "connectorConfiguration":{ - "shape":"SyntheticDescribeConnectorResponse__mapOf__string", + "shape":"__sensitive__mapOf__string", "documentation":"

    A map of keys to values that represent the configuration for the connector.

    " }, "connectorDescription":{ @@ -1590,26 +1590,6 @@ }, "documentation":"

    Details about the state of a resource.

    " }, - "SyntheticCreateConnectorRequest__mapOf__string":{ - "type":"map", - "key":{"shape":"__string"}, - "value":{"shape":"__string"}, - "sensitive":true - }, - "SyntheticCreateWorkerConfigurationRequest__string":{ - "type":"string", - "sensitive":true - }, - "SyntheticDescribeConnectorResponse__mapOf__string":{ - "type":"map", - "key":{"shape":"__string"}, - "value":{"shape":"__string"}, - "sensitive":true - }, - "SyntheticWorkerConfigurationRevisionDescription__string":{ - "type":"string", - "sensitive":true - }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -1746,7 +1726,7 @@ "documentation":"

    The description of the worker configuration revision.

    " }, "propertiesFileContent":{ - "shape":"SyntheticWorkerConfigurationRevisionDescription__string", + "shape":"__sensitiveString", "documentation":"

    Base64 encoded contents of the connect-distributed.properties file.

    " }, "revision":{ @@ -1883,6 +1863,16 @@ "max":9223372036854775807, "min":1 }, + "__sensitiveString":{ + "type":"string", + "sensitive":true + }, + "__sensitive__mapOf__string":{ + "type":"map", + "key":{"shape":"__string"}, + "value":{"shape":"__string"}, + "sensitive":true + }, "__string":{"type":"string"}, "__stringMax1024":{ "type":"string", diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index b73589c87be9..865671bff5e5 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index f20d54b2bbd8..fc094724459f 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 4d16be2e20c2..10b0a33ff2f8 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 9dc90d1215c2..a20a237126d2 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesis/src/main/resources/codegen-resources/customization.config b/services/kinesis/src/main/resources/codegen-resources/customization.config index fdf66d583a84..caeb1f7c6509 100644 --- a/services/kinesis/src/main/resources/codegen-resources/customization.config +++ b/services/kinesis/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "customServiceMetadata": { "protocol": "cbor" }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deregisterStreamConsumer", "describeStreamConsumer", "listShards" diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index e76049d96456..bcec832296a7 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config index 282a5331893b..d1bf750d26b9 100644 --- a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listApplications" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "discoverInputSchema" ] } diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 702a9f6c54b0..28e212d644c2 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 296f355b1af5..65a7d17f2eaa 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config index becde5e5fc8e..e0972ef57fee 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listStreams" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "listTagsForStream", "describeStream" ] diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index cb479896aee8..e69d8b0bac56 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config index 97db0dd1e81b..232ca942ec4a 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getHLSStreamingSessionURL" ] } diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 55832fdd3b67..da798b83c25b 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 8756eafe4753..ae894036dea3 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index ef9939dacb92..c8c4a6a45159 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 5aec2c2d6cc2..8b3fec785b6c 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index bd50a703a6b9..4e3377436804 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index a474099293aa..7d557fd82fe0 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 83d194f193e0..16266ce2d99e 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index da511654ac95..8870c50a4354 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 39dabf8b57c6..5fb6d567373b 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index fa71d1cade5f..756675f3c8e3 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -13666,7 +13666,7 @@ "type":"integer", "box":true, "max":3, - "min":1 + "min":0 } }, "documentation":"

    " diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 67b37cfdd201..f65e5db74546 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index b3912070dad0..7cf0ac19328b 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index d163efe6f1ae..9228078fb7b7 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanager/src/main/resources/codegen-resources/customization.config b/services/licensemanager/src/main/resources/codegen-resources/customization.config index b98b012071c8..cb1971ffe74b 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanager/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "getServiceSettings", "listLicenseConfigurations" ], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getServiceSettings", "listLicenseConfigurations", "listResourceInventory" diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index c0ddca90edb3..146abbcaaf48 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index e34ed4a77431..cabd39818aa1 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 53095b2a665d..97a075983be3 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/location/pom.xml b/services/location/pom.xml index 4001be5f9eda..e23f0a7570a0 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 381a2b63c8d0..b9825ec05bd5 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 7eadcb29a098..aa05964065ae 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 44f0a16a00e5..3ec17c8111eb 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index 4e595cf5fa0e..7bcd1ea8b0b7 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index e21c1a8a41da..1799ec704155 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index dfab482f65d1..b498ad627204 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie/src/main/resources/codegen-resources/customization.config b/services/macie/src/main/resources/codegen-resources/customization.config index 15a246df948d..c0e92ca2cb15 100644 --- a/services/macie/src/main/resources/codegen-resources/customization.config +++ b/services/macie/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listMemberAccounts", "listS3Resources" ] diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 91375a5dd685..368efaa19b6e 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 1d235f59ab83..0bc2b9833ee4 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml index b50a8479cf96..1b364d20c0eb 100644 --- a/services/managedblockchainquery/pom.xml +++ b/services/managedblockchainquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT managedblockchainquery AWS Java SDK :: Services :: Managed Blockchain Query diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 56bf88cc6742..e454117df1ce 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 0a5d01b6bb26..29e91ce4da0a 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 20b908726c88..086893dbabb5 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 8711e7594ccd..198f12cafb55 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index b4ebc5183db4..1d3d3f3c9e03 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index e3cfe40b7a4a..9d2eee41040e 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/customization.config b/services/mediaconvert/src/main/resources/codegen-resources/customization.config index 6abaf75b14b1..e5d2b586984e 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconvert/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json index dcc0061568b6..b3cfd3d2ad88 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,175 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://mediaconvert.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "PartitionResult" }, - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -304,91 +225,134 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://mediaconvert.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], - "type": "tree", - "rules": [ + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "cn-northwest-1" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "cn-northwest-1" + ] + } + ], + "endpoint": { + "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index c6ee5fd641da..1cd4efb6ac81 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -543,7 +543,7 @@ "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Retrieve the JSON for a specific completed transcoding job." + "documentation": "Retrieve the JSON for a specific transcoding job." }, "GetJobTemplate": { "name": "GetJobTemplate", @@ -1617,7 +1617,20 @@ "TCS", "VHL", "VHC", - "VHR" + "VHR", + "TBL", + "TBC", + "TBR", + "RSL", + "RSR", + "LW", + "RW", + "LFE2", + "LT", + "RT", + "HI", + "NAR", + "M" ] }, "AudioChannelTaggingSettings": { @@ -1645,7 +1658,8 @@ "EAC3_ATMOS", "VORBIS", "OPUS", - "PASSTHROUGH" + "PASSTHROUGH", + "FLAC" ] }, "AudioCodecSettings": { @@ -1681,6 +1695,11 @@ "locationName": "eac3Settings", "documentation": "Required when you set Codec to the value EAC3." }, + "FlacSettings": { + "shape": "FlacSettings", + "locationName": "flacSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC." + }, "Mp2Settings": { "shape": "Mp2Settings", "locationName": "mp2Settings", @@ -2003,7 +2022,7 @@ "MaxAbrBitrate": { "shape": "__integerMin100000Max100000000", "locationName": "maxAbrBitrate", - "documentation": "Optional. The maximum target bit rate used in your automated ABR stack. Use this value to set an upper limit on the bandwidth consumed by the highest-quality rendition. This is the rendition that is delivered to viewers with the fastest internet connections. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default." + "documentation": "Specify the maximum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default. The average bitrate of your highest-quality rendition will be equal to or below this value, depending on the quality, complexity, and resolution of your content. Note that the instantaneous maximum bitrate may vary above the value that you specify." }, "MaxRenditions": { "shape": "__integerMin3Max15", @@ -2013,7 +2032,7 @@ "MinAbrBitrate": { "shape": "__integerMin100000Max100000000", "locationName": "minAbrBitrate", - "documentation": "Optional. The minimum target bitrate used in your automated ABR stack. Use this value to set a lower limit on the bitrate of video delivered to viewers with slow internet connections. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default." + "documentation": "Specify the minimum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default. The average bitrate of your lowest-quality rendition will be near this value. Note that the instantaneous minimum bitrate may vary below the value that you specify." }, "Rules": { "shape": "__listOfAutomatedAbrRule", @@ -2054,6 +2073,14 @@ "BIT_10" ] }, + "Av1FilmGrainSynthesis": { + "type": "string", + "documentation": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "Av1FramerateControl": { "type": "string", "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", @@ -2107,6 +2134,11 @@ "locationName": "bitDepth", "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit." }, + "FilmGrainSynthesis": { + "shape": "Av1FilmGrainSynthesis", + "locationName": "filmGrainSynthesis", + "documentation": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor." + }, "FramerateControl": { "shape": "Av1FramerateControl", "locationName": "framerateControl", @@ -5410,6 +5442,27 @@ "MILLISECONDS" ] }, + "FlacSettings": { + "type": "structure", + "members": { + "BitDepth": { + "shape": "__integerMin16Max24", + "locationName": "bitDepth", + "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + }, + "Channels": { + "shape": "__integerMin1Max8", + "locationName": "channels", + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are between 1 and 8." + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate", + "documentation": "Sample rate in hz." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC." + }, "FontScript": { "type": "string", "documentation": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset.", @@ -8868,6 +8921,16 @@ "locationName": "programNumber", "documentation": "Use Program number to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset", + "documentation": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2." + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset." + }, "RateMode": { "shape": "M2tsRateMode", "locationName": "rateMode", @@ -9029,6 +9092,16 @@ "locationName": "programNumber", "documentation": "The value of the program number field in the Program Map Table." }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset", + "documentation": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2." + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset." + }, "Scte35Pid": { "shape": "__integerMin32Max8182", "locationName": "scte35Pid", @@ -11078,6 +11151,11 @@ "shape": "S3EncryptionSettings", "locationName": "encryption", "documentation": "Settings for how your job outputs are encrypted as they are uploaded to Amazon S3." + }, + "StorageClass": { + "shape": "S3StorageClass", + "locationName": "storageClass", + "documentation": "Specify the S3 storage class to use for this destination." } }, "documentation": "Settings associated with S3 destination" @@ -11121,6 +11199,19 @@ "SERVER_SIDE_ENCRYPTION_KMS" ] }, + "S3StorageClass": { + "type": "string", + "documentation": "Specify the S3 storage class to use for this destination.", + "enum": [ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE" + ] + }, "SampleRangeConversion": { "type": "string", "documentation": "Specify how MediaConvert limits the color sample range for this output. To create a limited range output from a full range input: Choose Limited range squeeze. For full range inputs, MediaConvert performs a linear offset to color samples equally across all pixels and frames. Color samples in 10-bit outputs are limited to 64 through 940, and 8-bit outputs are limited to 16 through 235. Note: For limited range inputs, values for color samples are passed through to your output unchanged. MediaConvert does not limit the sample range. To correct pixels in your input that are out of range or out of gamut: Choose Limited range clip. Use for broadcast applications. MediaConvert conforms any pixels outside of the values that you specify under Minimum YUV and Maximum YUV to limited range bounds. MediaConvert also corrects any YUV values that, when converted to RGB, would be outside the bounds you specify under Minimum RGB tolerance and Maximum RGB tolerance. With either limited range conversion, MediaConvert writes the sample range metadata in the output.", @@ -11489,6 +11580,14 @@ }, "documentation": "Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings." }, + "TsPtsOffset": { + "type": "string", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset.", + "enum": [ + "AUTO", + "SECONDS" + ] + }, "TtmlDestinationSettings": { "type": "structure", "members": { @@ -13193,6 +13292,11 @@ "min": 1, "max": 64 }, + "__integerMin1Max8": { + "type": "integer", + "min": 1, + "max": 8 + }, "__integerMin22050Max48000": { "type": "integer", "min": 22050, diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index c2b7f194f313..f005a1eea3c0 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/customization.config b/services/medialive/src/main/resources/codegen-resources/customization.config index 4f122f084d33..2b6d3476b2dd 100644 --- a/services/medialive/src/main/resources/codegen-resources/customization.config +++ b/services/medialive/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "listOfferings", "listReservations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createChannel", "createInput", "createInputSecurityGroup" diff --git a/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json index a354a5ca2738..93e47c55b38c 100644 --- a/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index 12e6a1f61799..4137904a5e69 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -3031,6 +3031,14 @@ "MEDIUM_LOW" ] }, + "Ac3AttenuationControl": { + "type": "string", + "documentation": "Ac3 Attenuation Control", + "enum": [ + "ATTENUATE_3_DB", + "NONE" + ] + }, "Ac3BitstreamMode": { "type": "string", "documentation": "Ac3 Bitstream Mode", @@ -3116,6 +3124,11 @@ "shape": "Ac3MetadataControl", "locationName": "metadataControl", "documentation": "When set to \"followInput\", encoder metadata will be sourced from the DD, DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied from one of these streams, then the static metadata settings will be used." + }, + "AttenuationControl": { + "shape": "Ac3AttenuationControl", + "locationName": "attenuationControl", + "documentation": "Applies a 3 dB attenuation to the surround channels. Applies only when the coding mode parameter is CODING_MODE_3_2_LFE." } }, "documentation": "Ac3 Settings" @@ -7487,6 +7500,22 @@ "TimecodeConfig" ] }, + "EpochLockingSettings": { + "type": "structure", + "members": { + "CustomEpoch": { + "shape": "__string", + "locationName": "customEpoch", + "documentation": "Optional. Enter a value here to use a custom epoch, instead of the standard epoch (which started at 1970-01-01T00:00:00 UTC). Specify the start time of the custom epoch, in YYYY-MM-DDTHH:MM:SS in UTC. The time must be 2000-01-01T00:00:00 or later. Always set the MM:SS portion to 00:00." + }, + "JamSyncTime": { + "shape": "__string", + "locationName": "jamSyncTime", + "documentation": "Optional. Enter a time for the jam sync. The default is midnight UTC. When epoch locking is enabled, MediaLive performs a daily jam sync on every output encode to ensure timecodes don\u2019t diverge from the wall clock. The jam sync applies only to encodes with frame rate of 29.97 or 59.94 FPS. To override, enter a time in HH:MM:SS in UTC. Always set the MM:SS portion to 00:00." + } + }, + "documentation": "Epoch Locking Settings" + }, "Esam": { "type": "structure", "members": { @@ -7932,6 +7961,11 @@ "shape": "GlobalConfigurationLowFramerateInputs", "locationName": "supportLowFramerateInputs", "documentation": "Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second." + }, + "OutputLockingSettings": { + "shape": "OutputLockingSettings", + "locationName": "outputLockingSettings", + "documentation": "Advanced output locking settings" } }, "documentation": "Global Configuration" @@ -9491,6 +9525,15 @@ }, "documentation": "Settings to configure an action so that it occurs as soon as possible." }, + "IncludeFillerNalUnits": { + "type": "string", + "documentation": "Include Filler Nal Units", + "enum": [ + "AUTO", + "DROP", + "INCLUDE" + ] + }, "Input": { "type": "structure", "members": { @@ -11654,6 +11697,14 @@ "PASSTHROUGH" ] }, + "M3u8KlvBehavior": { + "type": "string", + "documentation": "M3u8 Klv Behavior", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, "M3u8NielsenId3Behavior": { "type": "string", "documentation": "M3u8 Nielsen Id3 Behavior", @@ -11765,6 +11816,16 @@ "shape": "__string", "locationName": "videoPid", "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream. Can be entered as a decimal or hexadecimal value." + }, + "KlvBehavior": { + "shape": "M3u8KlvBehavior", + "locationName": "klvBehavior", + "documentation": "If set to passthrough, passes any KLV data from the input source to this output." + }, + "KlvDataPids": { + "shape": "__string", + "locationName": "klvDataPids", + "documentation": "Packet Identifier (PID) for input source KLV data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values. Each PID specified must be in the range of 32 (or 0x20)..8182 (or 0x1ff6)." } }, "documentation": "Settings information for the .m3u8 container" @@ -13152,6 +13213,20 @@ }, "documentation": "Reference to an OutputDestination ID defined in the channel" }, + "OutputLockingSettings": { + "type": "structure", + "members": { + "EpochLockingSettings": { + "shape": "EpochLockingSettings", + "locationName": "epochLockingSettings" + }, + "PipelineLockingSettings": { + "shape": "PipelineLockingSettings", + "locationName": "pipelineLockingSettings" + } + }, + "documentation": "Output Locking Settings" + }, "OutputSettings": { "type": "structure", "members": { @@ -13245,6 +13320,12 @@ "PIPELINE_1" ] }, + "PipelineLockingSettings": { + "type": "structure", + "members": { + }, + "documentation": "Pipeline Locking Settings" + }, "PipelinePauseStateSettings": { "type": "structure", "members": { @@ -13819,6 +13900,11 @@ "shape": "__integerMin0", "locationName": "restartDelay", "documentation": "If a streaming output fails, number of seconds to wait until a restart is initiated. A value of 0 means never restart." + }, + "IncludeFillerNalUnits": { + "shape": "IncludeFillerNalUnits", + "locationName": "includeFillerNalUnits", + "documentation": "Applies only when the rate control mode (in the codec settings) is CBR (constant bit rate). Controls whether the RTMP output stream is padded (with FILL NAL units) in order to achieve a constant bit rate that is truly constant. When there is no padding, the bandwidth varies (up to the bitrate value in the codec settings). We recommend that you choose Auto." } }, "documentation": "Rtmp Group Settings" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 140674caa346..1a083744eb11 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json index 471664d79121..94f09fea26ed 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediapackage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://mediapackage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://mediapackage-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://mediapackage-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediapackage.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://mediapackage.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mediapackage.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://mediapackage.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json b/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json index 89d130745a4f..9d863247b5ab 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json @@ -351,6 +351,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -364,6 +375,28 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -377,6 +410,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -440,6 +484,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/mediapackage/src/main/resources/codegen-resources/service-2.json b/services/mediapackage/src/main/resources/codegen-resources/service-2.json index f591b0e51dc7..65c3f2e00819 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/service-2.json +++ b/services/mediapackage/src/main/resources/codegen-resources/service-2.json @@ -1983,7 +1983,7 @@ "Password": { "documentation": "The system generated password for ingest authentication.", "locationName": "password", - "shape": "__string" + "shape": "SensitiveString" }, "Url": { "documentation": "The ingest URL to which the source stream should be sent.", @@ -1993,7 +1993,7 @@ "Username": { "documentation": "The system generated username for ingest authentication.", "locationName": "username", - "shape": "__string" + "shape": "SensitiveString" } }, "type": "structure" @@ -2655,6 +2655,10 @@ ], "type": "string" }, + "SensitiveString": { + "sensitive": true, + "type": "string" + }, "ServiceUnavailableException": { "documentation": "An unexpected error occurred.", "error": { diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 16cdfacabf51..855e5893f57a 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 9647ed42f001..2e286227aa8f 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index ba5ffed496cc..4cf4582b47e1 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastore/src/main/resources/codegen-resources/customization.config b/services/mediastore/src/main/resources/codegen-resources/customization.config index cc404c84d6c8..933304e140c4 100644 --- a/services/mediastore/src/main/resources/codegen-resources/customization.config +++ b/services/mediastore/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listContainers" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeContainer" ] } diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index dc6bdead1a12..9ee3763fec92 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java new file mode 100644 index 000000000000..91c9994ae307 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java @@ -0,0 +1,152 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.mediastore.MediaStoreClient; +import software.amazon.awssdk.services.mediastore.model.Container; +import software.amazon.awssdk.services.mediastore.model.ContainerStatus; +import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; +import software.amazon.awssdk.testutils.Waiter; +import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; + +/** + * Base class for MediaStoreData integration tests. Used for Transfer-Encoding and Request Compression testing. + */ +public class MediaStoreDataIntegrationTestBase extends AwsIntegrationTestBase { + protected static AwsCredentialsProvider credentialsProvider; + protected static MediaStoreClient mediaStoreClient; + protected static URI uri; + + @BeforeAll + public static void init() { + credentialsProvider = getCredentialsProvider(); + mediaStoreClient = MediaStoreClient.builder() + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .build(); + } + + @AfterEach + public void reset() { + CaptureTransferEncodingHeaderInterceptor.reset(); + } + + protected static Container createContainer(String containerName) { + mediaStoreClient.createContainer(r -> r.containerName(containerName)); + DescribeContainerResponse response = waitContainerToBeActive(containerName); + return response.container(); + } + + private static DescribeContainerResponse waitContainerToBeActive(String containerName) { + return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(containerName))) + .until(r -> r.container().status() == ContainerStatus.ACTIVE) + .orFailAfter(Duration.ofMinutes(3)); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] body) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(body)) + .subscribe(s); + } + }; + } + + protected static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isChunked; + + public static void reset() { + isChunked = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); + } + } + + protected static class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + protected TestContentProvider(byte[] content) { + this.content = content.clone(); + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return Collections.unmodifiableList(createdStreams); + } + } + + protected static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java new file mode 100644 index 000000000000..228102b8f9f4 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.IOException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.CompressionConfiguration; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectResponse; +import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; +import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; +import software.amazon.awssdk.testutils.Waiter; + +/** + * Integration test to verify Request Compression functionalities for streaming operations. Do not delete. + */ +public class RequestCompressionStreamingIntegrationTest extends MediaStoreDataIntegrationTestBase { + protected static final String CONTAINER_NAME = "java-sdk-test-mediastoredata-compression" + Instant.now().toEpochMilli(); + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private static String compressedBody; + private static MediaStoreDataClient syncClient; + private static MediaStoreDataAsyncClient asyncClient; + private static PutObjectRequest putObjectRequest; + private static DeleteObjectRequest deleteObjectRequest; + private static GetObjectRequest getObjectRequest; + + @BeforeAll + public static void setup() { + uri = URI.create(createContainer(CONTAINER_NAME).endpoint()); + + CompressionConfiguration compressionConfiguration = + CompressionConfiguration.builder() + .minimumCompressionThresholdInBytes(1) + .requestCompressionEnabled(true) + .build(); + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(true) + .build(); + + syncClient = MediaStoreDataClient.builder() + .endpointOverride(uri) + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .compressionConfiguration(compressionConfiguration)) + .build(); + + asyncClient = MediaStoreDataAsyncClient.builder() + .endpointOverride(uri) + .credentialsProvider(credentialsProvider) + .httpClient(NettyNioAsyncHttpClient.create()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .compressionConfiguration(compressionConfiguration)) + .build(); + + putObjectRequest = PutObjectRequest.builder() + .contentType("application/octet-stream") + .path("/foo") + .overrideConfiguration( + o -> o.compressionConfiguration( + c -> c.requestCompressionEnabled(true))) + .build(); + deleteObjectRequest = DeleteObjectRequest.builder().path("/foo").build(); + getObjectRequest = GetObjectRequest.builder().path("/foo").build(); + + Compressor compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); + compressedBody = new String(compressedBodyBytes); + } + + @AfterAll + public static void tearDown() throws InterruptedException { + syncClient.deleteObject(deleteObjectRequest); + Waiter.run(() -> syncClient.describeObject(r -> r.path("/foo"))) + .untilException(ObjectNotFoundException.class) + .orFailAfter(Duration.ofMinutes(1)); + Thread.sleep(1000); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); + } + + @AfterEach + public void cleanUp() { + CaptureContentEncodingHeaderInterceptor.reset(); + } + + @Test + public void putObject_withSyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); + syncClient.putObject(putObjectRequest, RequestBody.fromContentProvider(provider, "binary/octet-stream")); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); + } + + @Test + public void putObject_withAsyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { + AsyncRequestBody asyncRequestBody = customAsyncRequestBodyWithoutContentLength(UNCOMPRESSED_BODY.getBytes()); + asyncClient.putObject(putObjectRequest, asyncRequestBody).join(); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); + } + + private static class CaptureContentEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isGzip; + + public static void reset() { + isGzip = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isGzip = context.httpRequest().matchingHeaders("Content-Encoding").contains("gzip"); + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java index acab0a8d6723..b4137a14eea9 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java @@ -16,70 +16,38 @@ package software.amazon.awssdk.services.mediastoredata; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; -import io.reactivex.Flowable; -import java.io.ByteArrayInputStream; -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; import java.net.URI; -import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.reactivestreams.Subscriber; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; -import software.amazon.awssdk.services.mediastore.MediaStoreClient; -import software.amazon.awssdk.services.mediastore.model.Container; -import software.amazon.awssdk.services.mediastore.model.ContainerStatus; -import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; /** * Integration test to verify Transfer-Encoding:chunked functionalities for all supported HTTP clients. Do not delete. */ -public class TransferEncodingChunkedIntegrationTest extends AwsIntegrationTestBase { - private static final String CONTAINER_NAME = "java-sdk-test-" + Instant.now().toEpochMilli(); - private static MediaStoreClient mediaStoreClient; +public class TransferEncodingChunkedIntegrationTest extends MediaStoreDataIntegrationTestBase { + protected static final String CONTAINER_NAME = "java-sdk-test-mediastoredata-transferencoding" + Instant.now().toEpochMilli(); private static MediaStoreDataClient syncClientWithApache; private static MediaStoreDataClient syncClientWithUrlConnection; private static MediaStoreDataAsyncClient asyncClientWithNetty; - private static AwsCredentialsProvider credentialsProvider; - private static Container container; private static PutObjectRequest putObjectRequest; private static DeleteObjectRequest deleteObjectRequest; @BeforeAll public static void setup() { - credentialsProvider = getCredentialsProvider(); - mediaStoreClient = MediaStoreClient.builder() - .credentialsProvider(credentialsProvider) - .httpClient(ApacheHttpClient.builder().build()) - .build(); - container = createContainer(); - URI uri = URI.create(container.endpoint()); - + uri = URI.create(createContainer(CONTAINER_NAME).endpoint()); syncClientWithApache = MediaStoreDataClient.builder() .endpointOverride(uri) .credentialsProvider(credentialsProvider) @@ -112,12 +80,13 @@ public static void setup() { } @AfterAll - public static void tearDown() { + public static void tearDown() throws InterruptedException { syncClientWithApache.deleteObject(deleteObjectRequest); Waiter.run(() -> syncClientWithApache.describeObject(r -> r.path("/foo"))) .untilException(ObjectNotFoundException.class) .orFailAfter(Duration.ofMinutes(1)); - CaptureTransferEncodingHeaderInterceptor.reset(); + Thread.sleep(1000); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); } @Test @@ -136,89 +105,7 @@ public void urlConnectionClientPutObject_withoutContentLength_sendsSuccessfully( @Test public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() { - asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength()).join(); + asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength("TestBody".getBytes())).join(); assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); } - - private static Container createContainer() { - mediaStoreClient.createContainer(r -> r.containerName(CONTAINER_NAME)); - DescribeContainerResponse response = waitContainerToBeActive(); - return response.container(); - } - - private static DescribeContainerResponse waitContainerToBeActive() { - return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(CONTAINER_NAME))) - .until(r -> ContainerStatus.ACTIVE.equals(r.container().status())) - .orFailAfter(Duration.ofMinutes(3)); - } - - private static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { - private static boolean isChunked; - - public static void reset() { - isChunked = false; - } - - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); - } - } - - private AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { - return new AsyncRequestBody() { - @Override - public Optional contentLength() { - return Optional.empty(); - } - - @Override - public void subscribe(Subscriber s) { - Flowable.fromPublisher(AsyncRequestBody.fromBytes("Random text".getBytes())) - .subscribe(s); - } - }; - } - - private static class TestContentProvider implements ContentStreamProvider { - private final byte[] content; - private final List createdStreams = new ArrayList<>(); - private CloseTrackingInputStream currentStream; - - private TestContentProvider(byte[] content) { - this.content = content; - } - - @Override - public InputStream newStream() { - if (currentStream != null) { - invokeSafely(currentStream::close); - } - currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); - createdStreams.add(currentStream); - return currentStream; - } - - List getCreatedStreams() { - return createdStreams; - } - } - - private static class CloseTrackingInputStream extends FilterInputStream { - private boolean isClosed = false; - - CloseTrackingInputStream(InputStream in) { - super(in); - } - - @Override - public void close() throws IOException { - super.close(); - isClosed = true; - } - - boolean isClosed() { - return isClosed; - } - } } diff --git a/services/mediastoredata/src/it/resources/log4j2.properties b/services/mediastoredata/src/it/resources/log4j2.properties new file mode 100644 index 000000000000..ea24f17148e6 --- /dev/null +++ b/services/mediastoredata/src/it/resources/log4j2.properties @@ -0,0 +1,38 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +status = warn + +appender.console.type = Console +appender.console.name = ConsoleAppender +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n%throwable + +rootLogger.level = info +rootLogger.appenderRef.stdout.ref = ConsoleAppender + +# Uncomment below to enable more specific logging +# +#logger.sdk.name = software.amazon.awssdk +#logger.sdk.level = debug +# +#logger.request.name = software.amazon.awssdk.request +#logger.request.level = debug +# +#logger.apache.name = org.apache.http.wire +#logger.apache.level = debug +# +#logger.netty.name = io.netty.handler.logging +#logger.netty.level = debug \ No newline at end of file diff --git a/services/mediastoredata/src/main/resources/codegen-resources/customization.config b/services/mediastoredata/src/main/resources/codegen-resources/customization.config index 18dafbef2bab..a5746334a293 100644 --- a/services/mediastoredata/src/main/resources/codegen-resources/customization.config +++ b/services/mediastoredata/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listItems" ] } diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index ba3f32adef00..046adc0af14e 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json index 03d6980c4d0d..f6487bddd7f4 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 9aa11e84a2c7..f7fade28b139 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -536,7 +536,7 @@ "members":{ "AccessType":{ "shape":"AccessType", - "documentation":"

    The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location. Accepted value: S3_SIGV4.

    S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

    Before you can use S3_SIGV4, you must meet these requirements:

    • You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

    • The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    " + "documentation":"

    The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location.

    S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

    Before you can use S3_SIGV4, you must meet these requirements:

    • You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

    • The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    AUTODETECT_SIGV4 - AWS Signature Version 4 authentication for a set of supported services: MediaPackage Version 2 and Amazon S3 hosted virtual-style access. If your source location base URL is a MediaPackage Version 2 endpoint or an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the resource where your source content is stored.

    Before you can use AUTODETECT_SIGV4 with a MediaPackage Version 2 endpoint, you must meet these requirements:

    • You must grant MediaTailor access to your MediaPackage endpoint by granting mediatailor.amazonaws.com principal access in an Origin Access policy on the endpoint.

    • Your MediaTailor source location base URL must be a MediaPackage V2 endpoint.

    • The caller of the API must have mediapackagev2:GetObject IAM permissions to read all top level manifests referenced by the MediaTailor source packaging configurations.

    Before you can use AUTODETECT_SIGV4 with an Amazon S3 bucket, you must meet these requirements:

    • You must grant MediaTailor access to your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For more information about configuring access in IAM, see Access management in the IAM User Guide..

    • The mediatailor.amazonaws.com service principal must have permissions to read all top-level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    " }, "SecretsManagerAccessTokenConfiguration":{ "shape":"SecretsManagerAccessTokenConfiguration", @@ -549,7 +549,8 @@ "type":"string", "enum":[ "S3_SIGV4", - "SECRETS_MANAGER_ACCESS_TOKEN" + "SECRETS_MANAGER_ACCESS_TOKEN", + "AUTODETECT_SIGV4" ] }, "AdBreak":{ diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index 5ebe30ed9f6c..6f506ec3c0c5 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index 509d976f4b5b..d3cbdb09d220 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index 829c6e761799..c67b3baa02b3 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 07917103ac08..ea6924dec27e 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhub/src/main/resources/codegen-resources/customization.config b/services/migrationhub/src/main/resources/codegen-resources/customization.config index 6abaf75b14b1..e5d2b586984e 100644 --- a/services/migrationhub/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhub/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index bf216186376e..ca38b92237d5 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index 472aeedcb6a6..f0081ca3db96 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index a08bb0239521..f6260ccd3348 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index bab3418a884f..74bdfd9f8d39 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index b6c4e7f66125..9dca3950fd34 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mobile diff --git a/services/mobile/src/main/resources/codegen-resources/customization.config b/services/mobile/src/main/resources/codegen-resources/customization.config index 953b9278a889..27d3c7e09b78 100644 --- a/services/mobile/src/main/resources/codegen-resources/customization.config +++ b/services/mobile/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listBundles", "listProjects" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createProject" ] } diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 606a519775d0..2c54a8b650a7 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 mq diff --git a/services/mq/src/main/resources/codegen-resources/customization.config b/services/mq/src/main/resources/codegen-resources/customization.config index a0dbd42cc129..fd2da26cf900 100644 --- a/services/mq/src/main/resources/codegen-resources/customization.config +++ b/services/mq/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listBrokers", "listConfigurations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createBroker", "createConfiguration" ] diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 2608820fd5ee..c83be8f43513 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mturk/src/main/resources/codegen-resources/customization.config b/services/mturk/src/main/resources/codegen-resources/customization.config index 1827bbb57cbd..8d6cca6e6d53 100644 --- a/services/mturk/src/main/resources/codegen-resources/customization.config +++ b/services/mturk/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : ["listBonusPayments"], + "excludedSimpleMethods" : ["listBonusPayments"], "verifiedSimpleMethods" : [ "listWorkerBlocks", "listReviewableHITs", diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 67b54ab349a3..d4b5c41fa65f 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 2f77db6b8226..a46dabd16fb4 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/neptunedata/pom.xml b/services/neptunedata/pom.xml new file mode 100644 index 000000000000..3ced44ab92d3 --- /dev/null +++ b/services/neptunedata/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.144-SNAPSHOT + + neptunedata + AWS Java SDK :: Services :: Neptunedata + The AWS Java SDK for Neptunedata module holds the client classes that are used for + communicating with Neptunedata. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.neptunedata + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/neptunedata/src/main/resources/codegen-resources/customization.config b/services/neptunedata/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..1afec6c42d9f --- /dev/null +++ b/services/neptunedata/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,3 @@ +{ + "customErrorCodeFieldName": "code" +} diff --git a/services/neptunedata/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/neptunedata/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..8775e2ea8eb6 --- /dev/null +++ b/services/neptunedata/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://neptune-db.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/neptunedata/src/main/resources/codegen-resources/endpoint-tests.json b/services/neptunedata/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..0c56273f2458 --- /dev/null +++ b/services/neptunedata/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://neptune-db.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/neptunedata/src/main/resources/codegen-resources/paginators-1.json b/services/neptunedata/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/neptunedata/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/neptunedata/src/main/resources/codegen-resources/service-2.json b/services/neptunedata/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..30689da94826 --- /dev/null +++ b/services/neptunedata/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,4512 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-08-01", + "endpointPrefix":"neptune-db", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon NeptuneData", + "serviceId":"neptunedata", + "signatureVersion":"v4", + "signingName":"neptune-db", + "uid":"neptunedata-2023-08-01" + }, + "operations":{ + "CancelGremlinQuery":{ + "name":"CancelGremlinQuery", + "http":{ + "method":"DELETE", + "requestUri":"/gremlin/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"CancelGremlinQueryInput"}, + "output":{"shape":"CancelGremlinQueryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Cancels a Gremlin query. See Gremlin query cancellation for more information.

    ", + "idempotent":true + }, + "CancelLoaderJob":{ + "name":"CancelLoaderJob", + "http":{ + "method":"DELETE", + "requestUri":"/loader/{loadId}", + "responseCode":200 + }, + "input":{"shape":"CancelLoaderJobInput"}, + "output":{"shape":"CancelLoaderJobOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Cancels a specified load job. This is an HTTP DELETE request.

    See Neptune Loader Get-Status API for more information.

    ", + "idempotent":true + }, + "CancelMLDataProcessingJob":{ + "name":"CancelMLDataProcessingJob", + "http":{ + "method":"DELETE", + "requestUri":"/ml/dataprocessing/{id}", + "responseCode":200 + }, + "input":{"shape":"CancelMLDataProcessingJobInput"}, + "output":{"shape":"CancelMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels a Neptune ML data processing job. See The dataprocessing command.

    ", + "idempotent":true + }, + "CancelMLModelTrainingJob":{ + "name":"CancelMLModelTrainingJob", + "http":{ + "method":"DELETE", + "requestUri":"/ml/modeltraining/{id}", + "responseCode":200 + }, + "input":{"shape":"CancelMLModelTrainingJobInput"}, + "output":{"shape":"CancelMLModelTrainingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels a Neptune ML model training job. See Model training using the modeltraining command.

    ", + "idempotent":true + }, + "CancelMLModelTransformJob":{ + "name":"CancelMLModelTransformJob", + "http":{ + "method":"DELETE", + "requestUri":"/ml/modeltransform/{id}", + "responseCode":200 + }, + "input":{"shape":"CancelMLModelTransformJobInput"}, + "output":{"shape":"CancelMLModelTransformJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels a specified model transform job. See Use a trained model to generate new model artifacts.

    ", + "idempotent":true + }, + "CancelOpenCypherQuery":{ + "name":"CancelOpenCypherQuery", + "http":{ + "method":"DELETE", + "requestUri":"/opencypher/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"CancelOpenCypherQueryInput"}, + "output":{"shape":"CancelOpenCypherQueryOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Cancels a specified openCypher query. See Neptune openCypher status endpoint for more information.

    ", + "idempotent":true + }, + "CreateMLEndpoint":{ + "name":"CreateMLEndpoint", + "http":{ + "method":"POST", + "requestUri":"/ml/endpoints", + "responseCode":200 + }, + "input":{"shape":"CreateMLEndpointInput"}, + "output":{"shape":"CreateMLEndpointOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Creates a new Neptune ML inference endpoint that lets you query one specific model that the model-training process constructed. See Managing inference endpoints using the endpoints command.

    " + }, + "DeleteMLEndpoint":{ + "name":"DeleteMLEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/ml/endpoints/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteMLEndpointInput"}, + "output":{"shape":"DeleteMLEndpointOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels the creation of a Neptune ML inference endpoint. See Managing inference endpoints using the endpoints command.

    ", + "idempotent":true + }, + "DeletePropertygraphStatistics":{ + "name":"DeletePropertygraphStatistics", + "http":{ + "method":"DELETE", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "output":{"shape":"DeletePropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Deletes statistics for Gremlin and openCypher (property graph) data.

    ", + "idempotent":true + }, + "DeleteSparqlStatistics":{ + "name":"DeleteSparqlStatistics", + "http":{ + "method":"DELETE", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "output":{"shape":"DeleteSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Deletes SPARQL statistics

    ", + "idempotent":true + }, + "ExecuteFastReset":{ + "name":"ExecuteFastReset", + "http":{ + "method":"POST", + "requestUri":"/system", + "responseCode":200 + }, + "input":{"shape":"ExecuteFastResetInput"}, + "output":{"shape":"ExecuteFastResetOutput"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ServerShutdownException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    The fast reset REST API lets you reset a Neptune graph quicky and easily, removing all of its data.

    Neptune fast reset is a two-step process. First you call ExecuteFastReset with action set to initiateDatabaseReset. This returns a UUID token which you then include when calling ExecuteFastReset again with action set to performDatabaseReset. See Empty an Amazon Neptune DB cluster using the fast reset API.

    ", + "idempotent":true + }, + "ExecuteGremlinExplainQuery":{ + "name":"ExecuteGremlinExplainQuery", + "http":{ + "method":"POST", + "requestUri":"/gremlin/explain", + "responseCode":200 + }, + "input":{"shape":"ExecuteGremlinExplainQueryInput"}, + "output":{"shape":"ExecuteGremlinExplainQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Executes a Gremlin Explain query.

    Amazon Neptune has added a Gremlin feature named explain that provides is a self-service tool for understanding the execution approach being taken by the Neptune engine for the query. You invoke it by adding an explain parameter to an HTTP call that submits a Gremlin query.

    The explain feature provides information about the logical structure of query execution plans. You can use this information to identify potential evaluation and execution bottlenecks and to tune your query, as explained in Tuning Gremlin queries. You can also use query hints to improve query execution plans.

    " + }, + "ExecuteGremlinProfileQuery":{ + "name":"ExecuteGremlinProfileQuery", + "http":{ + "method":"POST", + "requestUri":"/gremlin/profile", + "responseCode":200 + }, + "input":{"shape":"ExecuteGremlinProfileQueryInput"}, + "output":{"shape":"ExecuteGremlinProfileQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Executes a Gremlin Profile query, which runs a specified traversal, collects various metrics about the run, and produces a profile report as output. See Gremlin profile API in Neptune for details.

    " + }, + "ExecuteGremlinQuery":{ + "name":"ExecuteGremlinQuery", + "http":{ + "method":"POST", + "requestUri":"/gremlin", + "responseCode":200 + }, + "input":{"shape":"ExecuteGremlinQueryInput"}, + "output":{"shape":"ExecuteGremlinQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    This commands executes a Gremlin query. Amazon Neptune is compatible with Apache TinkerPop3 and Gremlin, so you can use the Gremlin traversal language to query the graph, as described under The Graph in the Apache TinkerPop3 documentation. More details can also be found in Accessing a Neptune graph with Gremlin.

    " + }, + "ExecuteOpenCypherExplainQuery":{ + "name":"ExecuteOpenCypherExplainQuery", + "http":{ + "method":"POST", + "requestUri":"/opencypher/explain", + "responseCode":200 + }, + "input":{"shape":"ExecuteOpenCypherExplainQueryInput"}, + "output":{"shape":"ExecuteOpenCypherExplainQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Executes an openCypher explain request. See The openCypher explain feature for more information.

    " + }, + "ExecuteOpenCypherQuery":{ + "name":"ExecuteOpenCypherQuery", + "http":{ + "method":"POST", + "requestUri":"/opencypher", + "responseCode":200 + }, + "input":{"shape":"ExecuteOpenCypherQueryInput"}, + "output":{"shape":"ExecuteOpenCypherQueryOutput"}, + "errors":[ + {"shape":"QueryTooLargeException"}, + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"QueryLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"QueryLimitException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"CancelledByUserException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"MalformedQueryException"}, + {"shape":"ParsingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information.

    Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs.

    The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license.

    " + }, + "GetEngineStatus":{ + "name":"GetEngineStatus", + "http":{ + "method":"GET", + "requestUri":"/status", + "responseCode":200 + }, + "output":{"shape":"GetEngineStatusOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Check the status of the graph database on the host.

    " + }, + "GetGremlinQueryStatus":{ + "name":"GetGremlinQueryStatus", + "http":{ + "method":"GET", + "requestUri":"/gremlin/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"GetGremlinQueryStatusInput"}, + "output":{"shape":"GetGremlinQueryStatusOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets the status of a specified Gremlin query.

    " + }, + "GetLoaderJobStatus":{ + "name":"GetLoaderJobStatus", + "http":{ + "method":"GET", + "requestUri":"/loader/{loadId}", + "responseCode":200 + }, + "input":{"shape":"GetLoaderJobStatusInput"}, + "output":{"shape":"GetLoaderJobStatusOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets status information about a specified load job. Neptune keeps track of the most recent 1,024 bulk load jobs, and stores the last 10,000 error details per job.

    See Neptune Loader Get-Status API for more information.

    " + }, + "GetMLDataProcessingJob":{ + "name":"GetMLDataProcessingJob", + "http":{ + "method":"GET", + "requestUri":"/ml/dataprocessing/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLDataProcessingJobInput"}, + "output":{"shape":"GetMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieves information about a specified data processing job. See The dataprocessing command.

    " + }, + "GetMLEndpoint":{ + "name":"GetMLEndpoint", + "http":{ + "method":"GET", + "requestUri":"/ml/endpoints/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLEndpointInput"}, + "output":{"shape":"GetMLEndpointOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieves details about an inference endpoint. See Managing inference endpoints using the endpoints command.

    " + }, + "GetMLModelTrainingJob":{ + "name":"GetMLModelTrainingJob", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltraining/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLModelTrainingJobInput"}, + "output":{"shape":"GetMLModelTrainingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieves information about a Neptune ML model training job. See Model training using the modeltraining command.

    " + }, + "GetMLModelTransformJob":{ + "name":"GetMLModelTransformJob", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltransform/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMLModelTransformJobInput"}, + "output":{"shape":"GetMLModelTransformJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Gets information about a specified model transform job. See Use a trained model to generate new model artifacts.

    " + }, + "GetOpenCypherQueryStatus":{ + "name":"GetOpenCypherQueryStatus", + "http":{ + "method":"GET", + "requestUri":"/opencypher/status/{queryId}", + "responseCode":200 + }, + "input":{"shape":"GetOpenCypherQueryStatusInput"}, + "output":{"shape":"GetOpenCypherQueryStatusOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Retrieves the status of a specified openCypher query.

    " + }, + "GetPropertygraphStatistics":{ + "name":"GetPropertygraphStatistics", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "output":{"shape":"GetPropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets property graph statistics (Gremlin and openCypher).

    " + }, + "GetPropertygraphStream":{ + "name":"GetPropertygraphStream", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/stream", + "responseCode":200 + }, + "input":{"shape":"GetPropertygraphStreamInput"}, + "output":{"shape":"GetPropertygraphStreamOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"ExpiredStreamException"}, + {"shape":"InvalidParameterException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"StreamRecordsNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Gets a stream for a property graph.

    With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph.

    The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1.

    See Capturing graph changes in real time using Neptune streams.

    " + }, + "GetPropertygraphSummary":{ + "name":"GetPropertygraphSummary", + "http":{ + "method":"GET", + "requestUri":"/propertygraph/statistics/summary", + "responseCode":200 + }, + "input":{"shape":"GetPropertygraphSummaryInput"}, + "output":{"shape":"GetPropertygraphSummaryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets a graph summary for a property graph.

    " + }, + "GetRDFGraphSummary":{ + "name":"GetRDFGraphSummary", + "http":{ + "method":"GET", + "requestUri":"/rdf/statistics/summary", + "responseCode":200 + }, + "input":{"shape":"GetRDFGraphSummaryInput"}, + "output":{"shape":"GetRDFGraphSummaryOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets a graph summary for an RDF graph.

    " + }, + "GetSparqlStatistics":{ + "name":"GetSparqlStatistics", + "http":{ + "method":"GET", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "output":{"shape":"GetSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Gets RDF statistics (SPARQL).

    " + }, + "GetSparqlStream":{ + "name":"GetSparqlStream", + "http":{ + "method":"GET", + "requestUri":"/sparql/stream", + "responseCode":200 + }, + "input":{"shape":"GetSparqlStreamInput"}, + "output":{"shape":"GetSparqlStreamOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"ExpiredStreamException"}, + {"shape":"InvalidParameterException"}, + {"shape":"MemoryLimitExceededException"}, + {"shape":"StreamRecordsNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Gets a stream for an RDF graph.

    With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetSparqlStream lets you collect these change-log entries for an RDF graph.

    The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1.

    See Capturing graph changes in real time using Neptune streams.

    " + }, + "ListGremlinQueries":{ + "name":"ListGremlinQueries", + "http":{ + "method":"GET", + "requestUri":"/gremlin/status", + "responseCode":200 + }, + "input":{"shape":"ListGremlinQueriesInput"}, + "output":{"shape":"ListGremlinQueriesOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Lists active Gremlin queries. See Gremlin query status API for details about the output.

    " + }, + "ListLoaderJobs":{ + "name":"ListLoaderJobs", + "http":{ + "method":"GET", + "requestUri":"/loader", + "responseCode":200 + }, + "input":{"shape":"ListLoaderJobsInput"}, + "output":{"shape":"ListLoaderJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Retrieves a list of the loadIds for all active loader jobs.

    " + }, + "ListMLDataProcessingJobs":{ + "name":"ListMLDataProcessingJobs", + "http":{ + "method":"GET", + "requestUri":"/ml/dataprocessing", + "responseCode":200 + }, + "input":{"shape":"ListMLDataProcessingJobsInput"}, + "output":{"shape":"ListMLDataProcessingJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Returns a list of Neptune ML data processing jobs. See Listing active data-processing jobs using the Neptune ML dataprocessing command.

    " + }, + "ListMLEndpoints":{ + "name":"ListMLEndpoints", + "http":{ + "method":"GET", + "requestUri":"/ml/endpoints", + "responseCode":200 + }, + "input":{"shape":"ListMLEndpointsInput"}, + "output":{"shape":"ListMLEndpointsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Lists existing inference endpoints. See Managing inference endpoints using the endpoints command.

    " + }, + "ListMLModelTrainingJobs":{ + "name":"ListMLModelTrainingJobs", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltraining", + "responseCode":200 + }, + "input":{"shape":"ListMLModelTrainingJobsInput"}, + "output":{"shape":"ListMLModelTrainingJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Lists Neptune ML model-training jobs. See Model training using the modeltraining command.

    " + }, + "ListMLModelTransformJobs":{ + "name":"ListMLModelTransformJobs", + "http":{ + "method":"GET", + "requestUri":"/ml/modeltransform", + "responseCode":200 + }, + "input":{"shape":"ListMLModelTransformJobsInput"}, + "output":{"shape":"ListMLModelTransformJobsOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Returns a list of model transform job IDs. See Use a trained model to generate new model artifacts.

    " + }, + "ListOpenCypherQueries":{ + "name":"ListOpenCypherQueries", + "http":{ + "method":"GET", + "requestUri":"/opencypher/status", + "responseCode":200 + }, + "input":{"shape":"ListOpenCypherQueriesInput"}, + "output":{"shape":"ListOpenCypherQueriesOutput"}, + "errors":[ + {"shape":"InvalidNumericDataException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"FailureByQueryException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ParsingException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"TimeLimitExceededException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Lists active openCypher queries. See Neptune openCypher status endpoint for more information.

    " + }, + "ManagePropertygraphStatistics":{ + "name":"ManagePropertygraphStatistics", + "http":{ + "method":"POST", + "requestUri":"/propertygraph/statistics", + "responseCode":200 + }, + "input":{"shape":"ManagePropertygraphStatisticsInput"}, + "output":{"shape":"ManagePropertygraphStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Manages the generation and use of property graph statistics.

    ", + "idempotent":true + }, + "ManageSparqlStatistics":{ + "name":"ManageSparqlStatistics", + "http":{ + "method":"POST", + "requestUri":"/sparql/statistics", + "responseCode":200 + }, + "input":{"shape":"ManageSparqlStatisticsInput"}, + "output":{"shape":"ManageSparqlStatisticsOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"StatisticsNotAvailableException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ReadOnlyViolationException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Manages the generation and use of RDF graph statistics.

    ", + "idempotent":true + }, + "StartLoaderJob":{ + "name":"StartLoaderJob", + "http":{ + "method":"POST", + "requestUri":"/loader", + "responseCode":200 + }, + "input":{"shape":"StartLoaderJobInput"}, + "output":{"shape":"StartLoaderJobOutput"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"BulkLoadIdNotFoundException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"LoadUrlAccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"S3Exception"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"} + ], + "documentation":"

    Starts a Neptune bulk loader job to load data from an Amazon S3 bucket into a Neptune DB instance. See Using the Amazon Neptune Bulk Loader to Ingest Data.

    ", + "idempotent":true + }, + "StartMLDataProcessingJob":{ + "name":"StartMLDataProcessingJob", + "http":{ + "method":"POST", + "requestUri":"/ml/dataprocessing", + "responseCode":200 + }, + "input":{"shape":"StartMLDataProcessingJobInput"}, + "output":{"shape":"StartMLDataProcessingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Creates a new Neptune ML data processing job for processing the graph data exported from Neptune for training. See The dataprocessing command.

    " + }, + "StartMLModelTrainingJob":{ + "name":"StartMLModelTrainingJob", + "http":{ + "method":"POST", + "requestUri":"/ml/modeltraining", + "responseCode":200 + }, + "input":{"shape":"StartMLModelTrainingJobInput"}, + "output":{"shape":"StartMLModelTrainingJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Creates a new Neptune ML model training job. See Model training using the modeltraining command.

    " + }, + "StartMLModelTransformJob":{ + "name":"StartMLModelTransformJob", + "http":{ + "method":"POST", + "requestUri":"/ml/modeltransform", + "responseCode":200 + }, + "input":{"shape":"StartMLModelTransformJobInput"}, + "output":{"shape":"StartMLModelTransformJobOutput"}, + "errors":[ + {"shape":"UnsupportedOperationException"}, + {"shape":"BadRequestException"}, + {"shape":"MLResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientTimeoutException"}, + {"shape":"PreconditionsFailedException"}, + {"shape":"ConstraintViolationException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"MissingParameterException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Creates a new model transform job. See Use a trained model to generate new model artifacts.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised in case of an authentication or authorization failure.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Action":{ + "type":"string", + "enum":[ + "initiateDatabaseReset", + "performDatabaseReset" + ] + }, + "BadRequestException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the bad request.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request is submitted that cannot be processed.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Blob":{"type":"blob"}, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BulkLoadIdNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The bulk-load job ID that could not be found.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a specified bulk-load job ID cannot be found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "CancelGremlinQueryInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The unique identifier that identifies the query to be canceled.

    ", + "location":"uri", + "locationName":"queryId" + } + } + }, + "CancelGremlinQueryOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the cancelation

    " + } + } + }, + "CancelLoaderJobInput":{ + "type":"structure", + "required":["loadId"], + "members":{ + "loadId":{ + "shape":"String", + "documentation":"

    The ID of the load job to be deleted.

    ", + "location":"uri", + "locationName":"loadId" + } + } + }, + "CancelLoaderJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The cancellation status.

    " + } + } + }, + "CancelMLDataProcessingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the data-processing job.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"

    If set to TRUE, this flag specifies that all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE.

    ", + "location":"querystring", + "locationName":"clean" + } + } + }, + "CancelMLDataProcessingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the cancellation request.

    " + } + } + }, + "CancelMLModelTrainingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the model-training job to be canceled.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"

    If set to TRUE, this flag specifies that all Amazon S3 artifacts should be deleted when the job is stopped. The default is FALSE.

    ", + "location":"querystring", + "locationName":"clean" + } + } + }, + "CancelMLModelTrainingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the cancellation.

    " + } + } + }, + "CancelMLModelTransformJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique ID of the model transform job to be canceled.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"

    If this flag is set to TRUE, all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE.

    ", + "location":"querystring", + "locationName":"clean" + } + } + }, + "CancelMLModelTransformJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    the status of the cancelation.

    " + } + } + }, + "CancelOpenCypherQueryInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The unique ID of the openCypher query to cancel.

    ", + "location":"uri", + "locationName":"queryId" + }, + "silent":{ + "shape":"Boolean", + "documentation":"

    If set to TRUE, causes the cancelation of the openCypher query to happen silently.

    ", + "location":"querystring", + "locationName":"silent" + } + } + }, + "CancelOpenCypherQueryOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The cancellation status of the openCypher query.

    " + }, + "payload":{ + "shape":"Boolean", + "documentation":"

    The cancelation payload for the openCypher query.

    " + } + } + }, + "CancelledByUserException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a user cancelled a request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "Classes":{ + "type":"list", + "member":{"shape":"String"} + }, + "ClientTimeoutException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request timed out in the client.

    ", + "error":{ + "httpStatusCode":408, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ConcurrentModificationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request attempts to modify data that is concurrently being modified by another process.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ConstraintViolationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a value in a request field did not satisfy required constraints.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "CreateMLEndpointInput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name.

    " + }, + "mlModelTrainingJobId":{ + "shape":"String", + "documentation":"

    The job Id of the completed model-training job that has created the model that the inference endpoint will point to. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId.

    " + }, + "mlModelTransformJobId":{ + "shape":"String", + "documentation":"

    The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId.

    " + }, + "update":{ + "shape":"Boolean", + "documentation":"

    If set to true, update indicates that this is an update request. The default is false. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId.

    " + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will be thrown.

    " + }, + "modelName":{ + "shape":"String", + "documentation":"

    Model type for training. By default the Neptune ML model is automatically based on the modelType used in data processing, but you can specify a different model type here. The default is rgcn for heterogeneous graphs and kge for knowledge graphs. The only valid value for heterogeneous graphs is rgcn. Valid values for knowledge graphs are: kge, transe, distmult, and rotate.

    " + }, + "instanceType":{ + "shape":"String", + "documentation":"

    The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your budget.

    " + }, + "instanceCount":{ + "shape":"Integer", + "documentation":"

    The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1

    " + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.

    " + } + } + }, + "CreateMLEndpointOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique ID of the new inference endpoint.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The ARN for the new inference endpoint.

    " + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"

    The endpoint creation time, in milliseconds.

    " + } + } + }, + "CustomModelTrainingParameters":{ + "type":"structure", + "required":["sourceS3DirectoryPath"], + "members":{ + "sourceS3DirectoryPath":{ + "shape":"String", + "documentation":"

    The path to the Amazon S3 location where the Python module implementing your model is located. This must point to a valid existing Amazon S3 location that contains, at a minimum, a training script, a transform script, and a model-hpo-configuration.json file.

    " + }, + "trainingEntryPointScript":{ + "shape":"String", + "documentation":"

    The name of the entry point in your module of a script that performs model training and takes hyperparameters as command-line arguments, including fixed hyperparameters. The default is training.py.

    " + }, + "transformEntryPointScript":{ + "shape":"String", + "documentation":"

    The name of the entry point in your module of a script that should be run after the best model from the hyperparameter search has been identified, to compute the model artifacts necessary for model deployment. It should be able to run with no command-line arguments.The default is transform.py.

    " + } + }, + "documentation":"

    Contains custom model training parameters. See Custom models in Neptune ML.

    " + }, + "CustomModelTransformParameters":{ + "type":"structure", + "required":["sourceS3DirectoryPath"], + "members":{ + "sourceS3DirectoryPath":{ + "shape":"String", + "documentation":"

    The path to the Amazon S3 location where the Python module implementing your model is located. This must point to a valid existing Amazon S3 location that contains, at a minimum, a training script, a transform script, and a model-hpo-configuration.json file.

    " + }, + "transformEntryPointScript":{ + "shape":"String", + "documentation":"

    The name of the entry point in your module of a script that should be run after the best model from the hyperparameter search has been identified, to compute the model artifacts necessary for model deployment. It should be able to run with no command-line arguments. The default is transform.py.

    " + } + }, + "documentation":"

    Contains custom model transform parameters. See Use a trained model to generate new model artifacts.

    " + }, + "DeleteMLEndpointInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the inference endpoint.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will be thrown.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + }, + "clean":{ + "shape":"Boolean", + "documentation":"

    If this flag is set to TRUE, all Neptune ML S3 artifacts should be deleted when the job is stopped. The default is FALSE.

    ", + "location":"querystring", + "locationName":"clean" + } + } + }, + "DeleteMLEndpointOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the cancellation.

    " + } + } + }, + "DeletePropertygraphStatisticsOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"

    The HTTP response code: 200 if the delete was successful, or 204 if there were no statistics to delete.

    ", + "location":"statusCode" + }, + "status":{ + "shape":"String", + "documentation":"

    The cancel status.

    " + }, + "payload":{ + "shape":"DeleteStatisticsValueMap", + "documentation":"

    The deletion payload.

    " + } + } + }, + "DeleteSparqlStatisticsOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"

    The HTTP response code: 200 if the delete was successful, or 204 if there were no statistics to delete.

    ", + "location":"statusCode" + }, + "status":{ + "shape":"String", + "documentation":"

    The cancel status.

    " + }, + "payload":{ + "shape":"DeleteStatisticsValueMap", + "documentation":"

    The deletion payload.

    " + } + } + }, + "DeleteStatisticsValueMap":{ + "type":"structure", + "members":{ + "active":{ + "shape":"Boolean", + "documentation":"

    The current status of the statistics.

    " + }, + "statisticsId":{ + "shape":"String", + "documentation":"

    The ID of the statistics generation run that is currently occurring.

    " + } + }, + "documentation":"

    The payload for DeleteStatistics.

    " + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "DocumentValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Document"} + }, + "EdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"

    The number of edges that have this specific structure.

    " + }, + "edgeProperties":{ + "shape":"EdgeProperties", + "documentation":"

    A list of edge properties present in this specific structure.

    " + } + }, + "documentation":"

    An edge structure.

    " + }, + "EdgeStructures":{ + "type":"list", + "member":{"shape":"EdgeStructure"} + }, + "Encoding":{ + "type":"string", + "enum":["gzip"] + }, + "ExecuteFastResetInput":{ + "type":"structure", + "required":["action"], + "members":{ + "action":{ + "shape":"Action", + "documentation":"

    The fast reset action. One of the following values:

    • initiateDatabaseReset   –   This action generates a unique token needed to actually perform the fast reset.

    • performDatabaseReset   –   This action uses the token generated by the initiateDatabaseReset action to actually perform the fast reset.

    " + }, + "token":{ + "shape":"String", + "documentation":"

    The fast-reset token to initiate the reset.

    " + } + } + }, + "ExecuteFastResetOutput":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status is only returned for the performDatabaseReset action, and indicates whether or not the fast reset rquest is accepted.

    " + }, + "payload":{ + "shape":"FastResetToken", + "documentation":"

    The payload is only returned by the initiateDatabaseReset action, and contains the unique token to use with the performDatabaseReset action to make the reset occur.

    " + } + } + }, + "ExecuteGremlinExplainQueryInput":{ + "type":"structure", + "required":["gremlinQuery"], + "members":{ + "gremlinQuery":{ + "shape":"String", + "documentation":"

    The Gremlin explain query string.

    ", + "locationName":"gremlin" + } + } + }, + "ExecuteGremlinExplainQueryOutput":{ + "type":"structure", + "members":{ + "output":{ + "shape":"ReportAsText", + "documentation":"

    A text blob containing the Gremlin explain result, as described in Tuning Gremlin queries.

    " + } + }, + "payload":"output" + }, + "ExecuteGremlinProfileQueryInput":{ + "type":"structure", + "required":["gremlinQuery"], + "members":{ + "gremlinQuery":{ + "shape":"String", + "documentation":"

    The Gremlin query string to profile.

    ", + "locationName":"gremlin" + }, + "results":{ + "shape":"Boolean", + "documentation":"

    If this flag is set to TRUE, the query results are gathered and displayed as part of the profile report. If FALSE, only the result count is displayed.

    ", + "locationName":"profile.results" + }, + "chop":{ + "shape":"Integer", + "documentation":"

    If non-zero, causes the results string to be truncated at that number of characters. If set to zero, the string contains all the results.

    ", + "locationName":"profile.chop" + }, + "serializer":{ + "shape":"String", + "documentation":"

    If non-null, the gathered results are returned in a serialized response message in the format specified by this parameter. See Gremlin profile API in Neptune for more information.

    ", + "locationName":"profile.serializer" + }, + "indexOps":{ + "shape":"Boolean", + "documentation":"

    If this flag is set to TRUE, the results include a detailed report of all index operations that took place during query execution and serialization.

    ", + "locationName":"profile.indexOps" + } + } + }, + "ExecuteGremlinProfileQueryOutput":{ + "type":"structure", + "members":{ + "output":{ + "shape":"ReportAsText", + "documentation":"

    A text blob containing the Gremlin Profile result. See Gremlin profile API in Neptune for details.

    " + } + }, + "payload":"output" + }, + "ExecuteGremlinQueryInput":{ + "type":"structure", + "required":["gremlinQuery"], + "members":{ + "gremlinQuery":{ + "shape":"String", + "documentation":"

    Using this API, you can run Gremlin queries in string format much as you can using the HTTP endpoint. The interface is compatible with whatever Gremlin version your DB cluster is using (see the Tinkerpop client section to determine which Gremlin releases your engine version supports).

    ", + "locationName":"gremlin" + }, + "serializer":{ + "shape":"String", + "documentation":"

    If non-null, the query results are returned in a serialized response message in the format specified by this parameter. See the GraphSON section in the TinkerPop documentation for a list of the formats that are currently supported.

    ", + "location":"header", + "locationName":"accept" + } + } + }, + "ExecuteGremlinQueryOutput":{ + "type":"structure", + "members":{ + "requestId":{ + "shape":"String", + "documentation":"

    The unique identifier of the Gremlin query.

    " + }, + "status":{ + "shape":"GremlinQueryStatusAttributes", + "documentation":"

    The status of the Gremlin query.

    " + }, + "result":{ + "shape":"Document", + "documentation":"

    The Gremlin query output from the server.

    " + }, + "meta":{ + "shape":"Document", + "documentation":"

    Metadata about the Gremlin query.

    " + } + } + }, + "ExecuteOpenCypherExplainQueryInput":{ + "type":"structure", + "required":[ + "openCypherQuery", + "explainMode" + ], + "members":{ + "openCypherQuery":{ + "shape":"String", + "documentation":"

    The openCypher query string.

    ", + "locationName":"query" + }, + "parameters":{ + "shape":"String", + "documentation":"

    The openCypher query parameters.

    " + }, + "explainMode":{ + "shape":"OpenCypherExplainMode", + "documentation":"

    The openCypher explain mode. Can be one of: static, dynamic, or details.

    ", + "locationName":"explain" + } + } + }, + "ExecuteOpenCypherExplainQueryOutput":{ + "type":"structure", + "required":["results"], + "members":{ + "results":{ + "shape":"Blob", + "documentation":"

    A text blob containing the openCypher explain results.

    " + } + }, + "payload":"results" + }, + "ExecuteOpenCypherQueryInput":{ + "type":"structure", + "required":["openCypherQuery"], + "members":{ + "openCypherQuery":{ + "shape":"String", + "documentation":"

    The openCypher query string to be executed.

    ", + "locationName":"query" + }, + "parameters":{ + "shape":"String", + "documentation":"

    The openCypher query parameters for query execution. See Examples of openCypher parameterized queries for more information.

    " + } + } + }, + "ExecuteOpenCypherQueryOutput":{ + "type":"structure", + "required":["results"], + "members":{ + "results":{ + "shape":"Document", + "documentation":"

    The openCypherquery results.

    " + } + } + }, + "ExpiredStreamException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request attempts to access an stream that has expired.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "FailureByQueryException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request fails.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "FastResetToken":{ + "type":"structure", + "members":{ + "token":{ + "shape":"String", + "documentation":"

    A UUID generated by the database in the initiateDatabaseReset action, and then consumed by the performDatabaseReset to reset the database.

    " + } + }, + "documentation":"

    A structure containing the fast reset token used to initiate a fast reset.

    " + }, + "Format":{ + "type":"string", + "enum":[ + "csv", + "opencypher", + "ntriples", + "nquads", + "rdfxml", + "turtle" + ] + }, + "GetEngineStatusOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    Set to healthy if the instance is not experiencing problems. If the instance is recovering from a crash or from being rebooted and there are active transactions running from the latest server shutdown, status is set to recovery.

    " + }, + "startTime":{ + "shape":"String", + "documentation":"

    Set to the UTC time at which the current server process started.

    " + }, + "dbEngineVersion":{ + "shape":"String", + "documentation":"

    Set to the Neptune engine version running on your DB cluster. If this engine version has been manually patched since it was released, the version number is prefixed by Patch-.

    " + }, + "role":{ + "shape":"String", + "documentation":"

    Set to reader if the instance is a read-replica, or to writer if the instance is the primary instance.

    " + }, + "dfeQueryEngine":{ + "shape":"String", + "documentation":"

    Set to enabled if the DFE engine is fully enabled, or to viaQueryHint (the default) if the DFE engine is only used with queries that have the useDFE query hint set to true.

    " + }, + "gremlin":{ + "shape":"QueryLanguageVersion", + "documentation":"

    Contains information about the Gremlin query language available on your cluster. Specifically, it contains a version field that specifies the current TinkerPop version being used by the engine.

    " + }, + "sparql":{ + "shape":"QueryLanguageVersion", + "documentation":"

    Contains information about the SPARQL query language available on your cluster. Specifically, it contains a version field that specifies the current SPARQL version being used by the engine.

    " + }, + "opencypher":{ + "shape":"QueryLanguageVersion", + "documentation":"

    Contains information about the openCypher query language available on your cluster. Specifically, it contains a version field that specifies the current operCypher version being used by the engine.

    " + }, + "labMode":{ + "shape":"StringValuedMap", + "documentation":"

    Contains Lab Mode settings being used by the engine.

    " + }, + "rollingBackTrxCount":{ + "shape":"Integer", + "documentation":"

    If there are transactions being rolled back, this field is set to the number of such transactions. If there are none, the field doesn't appear at all.

    " + }, + "rollingBackTrxEarliestStartTime":{ + "shape":"String", + "documentation":"

    Set to the start time of the earliest transaction being rolled back. If no transactions are being rolled back, the field doesn't appear at all.

    " + }, + "features":{ + "shape":"DocumentValuedMap", + "documentation":"

    Contains status information about the features enabled on your DB cluster.

    " + }, + "settings":{ + "shape":"StringValuedMap", + "documentation":"

    Contains information about the current settings on your DB cluster. For example, contains the current cluster query timeout setting (clusterQueryTimeoutInMs).

    " + } + } + }, + "GetGremlinQueryStatusInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The unique identifier that identifies the Gremlin query.

    ", + "location":"uri", + "locationName":"queryId" + } + } + }, + "GetGremlinQueryStatusOutput":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The ID of the query for which status is being returned.

    " + }, + "queryString":{ + "shape":"String", + "documentation":"

    The Gremlin query string.

    " + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"

    The evaluation status of the Gremlin query.

    " + } + } + }, + "GetLoaderJobStatusInput":{ + "type":"structure", + "required":["loadId"], + "members":{ + "loadId":{ + "shape":"String", + "documentation":"

    The load ID of the load job to get the status of.

    ", + "location":"uri", + "locationName":"loadId" + }, + "details":{ + "shape":"Boolean", + "documentation":"

    Flag indicating whether or not to include details beyond the overall status (TRUE or FALSE; the default is FALSE).

    ", + "location":"querystring", + "locationName":"details" + }, + "errors":{ + "shape":"Boolean", + "documentation":"

    Flag indicating whether or not to include a list of errors encountered (TRUE or FALSE; the default is FALSE).

    The list of errors is paged. The page and errorsPerPage parameters allow you to page through all the errors.

    ", + "location":"querystring", + "locationName":"errors" + }, + "page":{ + "shape":"PositiveInteger", + "documentation":"

    The error page number (a positive integer; the default is 1). Only valid when the errors parameter is set to TRUE.

    ", + "location":"querystring", + "locationName":"page" + }, + "errorsPerPage":{ + "shape":"PositiveInteger", + "documentation":"

    The number of errors returned in each page (a positive integer; the default is 10). Only valid when the errors parameter set to TRUE.

    ", + "location":"querystring", + "locationName":"errorsPerPage" + } + } + }, + "GetLoaderJobStatusOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP response code for the request.

    " + }, + "payload":{ + "shape":"Document", + "documentation":"

    Status information about the load job, in a layout that could look like this:

    " + } + } + }, + "GetMLDataProcessingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the data-processing job to be retrieved.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLDataProcessingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    Status of the data processing job.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of this data-processing job.

    " + }, + "processingJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    Definition of the data processing job.

    " + } + } + }, + "GetMLEndpointInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the inference endpoint.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLEndpointOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the inference endpoint.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the inference endpoint.

    " + }, + "endpoint":{ + "shape":"MlResourceDefinition", + "documentation":"

    The endpoint definition.

    " + }, + "endpointConfig":{ + "shape":"MlConfigDefinition", + "documentation":"

    The endpoint configuration

    " + } + } + }, + "GetMLModelTrainingJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the model-training job to retrieve.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLModelTrainingJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the model training job.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of this model-training job.

    " + }, + "processingJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    The data processing job.

    " + }, + "hpoJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    The HPO job.

    " + }, + "modelTransformJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    The model transform job.

    " + }, + "mlModels":{ + "shape":"MlModels", + "documentation":"

    A list of the configurations of the ML models being used.

    " + } + } + }, + "GetMLModelTransformJobInput":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the model-transform job to be reetrieved.

    ", + "location":"uri", + "locationName":"id" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "GetMLModelTransformJobOutput":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the model-transform job.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The unique identifier of the model-transform job to be retrieved.

    " + }, + "baseProcessingJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    The base data processing job.

    " + }, + "remoteModelTransformJob":{ + "shape":"MlResourceDefinition", + "documentation":"

    The remote model transform job.

    " + }, + "models":{ + "shape":"Models", + "documentation":"

    A list of the configuration information for the models being used.

    " + } + } + }, + "GetOpenCypherQueryStatusInput":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The unique ID of the openCypher query for which to retrieve the query status.

    ", + "location":"uri", + "locationName":"queryId" + } + } + }, + "GetOpenCypherQueryStatusOutput":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The unique ID of the query for which status is being returned.

    " + }, + "queryString":{ + "shape":"String", + "documentation":"

    The openCypher query string.

    " + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"

    The openCypher query evaluation status.

    " + } + } + }, + "GetPropertygraphStatisticsOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200. See Common error codes for DFE statistics request for a list of common errors.

    " + }, + "payload":{ + "shape":"Statistics", + "documentation":"

    Statistics for property-graph data.

    " + } + } + }, + "GetPropertygraphStreamInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"GetPropertygraphStreamInputLimitLong", + "documentation":"

    Specifies the maximum number of records to return. There is also a size limit of 10 MB on the response that can't be modified and that takes precedence over the number of records specified in the limit parameter. The response does include a threshold-breaching record if the 10 MB limit was reached.

    The range for limit is 1 to 100,000, with a default of 10.

    ", + "location":"querystring", + "locationName":"limit" + }, + "iteratorType":{ + "shape":"IteratorType", + "documentation":"

    Can be one of:

    • AT_SEQUENCE_NUMBER   –   Indicates that reading should start from the event sequence number specified jointly by the commitNum and opNum parameters.

    • AFTER_SEQUENCE_NUMBER   –   Indicates that reading should start right after the event sequence number specified jointly by the commitNum and opNum parameters.

    • TRIM_HORIZON   –   Indicates that reading should start at the last untrimmed record in the system, which is the oldest unexpired (not yet deleted) record in the change-log stream.

    • LATEST   –   Indicates that reading should start at the most recent record in the system, which is the latest unexpired (not yet deleted) record in the change-log stream.

    ", + "location":"querystring", + "locationName":"iteratorType" + }, + "commitNum":{ + "shape":"Long", + "documentation":"

    The commit number of the starting record to read from the change-log stream. This parameter is required when iteratorType isAT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER, and ignored when iteratorType is TRIM_HORIZON or LATEST.

    ", + "location":"querystring", + "locationName":"commitNum" + }, + "opNum":{ + "shape":"Long", + "documentation":"

    The operation sequence number within the specified commit to start reading from in the change-log stream data. The default is 1.

    ", + "location":"querystring", + "locationName":"opNum" + }, + "encoding":{ + "shape":"Encoding", + "documentation":"

    If set to TRUE, Neptune compresses the response using gzip encoding.

    ", + "location":"header", + "locationName":"Accept-Encoding" + } + } + }, + "GetPropertygraphStreamInputLimitLong":{ + "type":"long", + "box":true, + "max":100000, + "min":1 + }, + "GetPropertygraphStreamOutput":{ + "type":"structure", + "required":[ + "lastEventId", + "lastTrxTimestampInMillis", + "format", + "records", + "totalRecords" + ], + "members":{ + "lastEventId":{ + "shape":"StringValuedMap", + "documentation":"

    Sequence identifier of the last change in the stream response.

    An event ID is composed of two fields: a commitNum, which identifies a transaction that changed the graph, and an opNum, which identifies a specific operation within that transaction:

    " + }, + "lastTrxTimestampInMillis":{ + "shape":"Long", + "documentation":"

    The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.

    ", + "locationName":"lastTrxTimestamp" + }, + "format":{ + "shape":"String", + "documentation":"

    Serialization format for the change records being returned. Currently, the only supported value is PG_JSON.

    " + }, + "records":{ + "shape":"PropertygraphRecordsList", + "documentation":"

    An array of serialized change-log stream records included in the response.

    " + }, + "totalRecords":{ + "shape":"Integer", + "documentation":"

    The total number of records in the response.

    " + } + } + }, + "GetPropertygraphSummaryInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"GraphSummaryType", + "documentation":"

    Mode can take one of two values: BASIC (the default), and DETAILED.

    ", + "location":"querystring", + "locationName":"mode" + } + } + }, + "GetPropertygraphSummaryOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200.

    ", + "location":"statusCode" + }, + "payload":{ + "shape":"PropertygraphSummaryValueMap", + "documentation":"

    Payload containing the property graph summary response.

    " + } + } + }, + "GetRDFGraphSummaryInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"GraphSummaryType", + "documentation":"

    Mode can take one of two values: BASIC (the default), and DETAILED.

    ", + "location":"querystring", + "locationName":"mode" + } + } + }, + "GetRDFGraphSummaryOutput":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"Integer", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200.

    ", + "location":"statusCode" + }, + "payload":{ + "shape":"RDFGraphSummaryValueMap", + "documentation":"

    Payload for an RDF graph summary response

    " + } + } + }, + "GetSparqlStatisticsOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200. See Common error codes for DFE statistics request for a list of common errors.

    " + }, + "payload":{ + "shape":"Statistics", + "documentation":"

    Statistics for RDF data.

    " + } + } + }, + "GetSparqlStreamInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"GetSparqlStreamInputLimitLong", + "documentation":"

    Specifies the maximum number of records to return. There is also a size limit of 10 MB on the response that can't be modified and that takes precedence over the number of records specified in the limit parameter. The response does include a threshold-breaching record if the 10 MB limit was reached.

    The range for limit is 1 to 100,000, with a default of 10.

    ", + "location":"querystring", + "locationName":"limit" + }, + "iteratorType":{ + "shape":"IteratorType", + "documentation":"

    Can be one of:

    • AT_SEQUENCE_NUMBER   –   Indicates that reading should start from the event sequence number specified jointly by the commitNum and opNum parameters.

    • AFTER_SEQUENCE_NUMBER   –   Indicates that reading should start right after the event sequence number specified jointly by the commitNum and opNum parameters.

    • TRIM_HORIZON   –   Indicates that reading should start at the last untrimmed record in the system, which is the oldest unexpired (not yet deleted) record in the change-log stream.

    • LATEST   –   Indicates that reading should start at the most recent record in the system, which is the latest unexpired (not yet deleted) record in the change-log stream.

    ", + "location":"querystring", + "locationName":"iteratorType" + }, + "commitNum":{ + "shape":"Long", + "documentation":"

    The commit number of the starting record to read from the change-log stream. This parameter is required when iteratorType isAT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER, and ignored when iteratorType is TRIM_HORIZON or LATEST.

    ", + "location":"querystring", + "locationName":"commitNum" + }, + "opNum":{ + "shape":"Long", + "documentation":"

    The operation sequence number within the specified commit to start reading from in the change-log stream data. The default is 1.

    ", + "location":"querystring", + "locationName":"opNum" + }, + "encoding":{ + "shape":"Encoding", + "documentation":"

    If set to TRUE, Neptune compresses the response using gzip encoding.

    ", + "location":"header", + "locationName":"Accept-Encoding" + } + } + }, + "GetSparqlStreamInputLimitLong":{ + "type":"long", + "box":true, + "max":100000, + "min":1 + }, + "GetSparqlStreamOutput":{ + "type":"structure", + "required":[ + "lastEventId", + "lastTrxTimestampInMillis", + "format", + "records", + "totalRecords" + ], + "members":{ + "lastEventId":{ + "shape":"StringValuedMap", + "documentation":"

    Sequence identifier of the last change in the stream response.

    An event ID is composed of two fields: a commitNum, which identifies a transaction that changed the graph, and an opNum, which identifies a specific operation within that transaction:

    " + }, + "lastTrxTimestampInMillis":{ + "shape":"Long", + "documentation":"

    The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.

    ", + "locationName":"lastTrxTimestamp" + }, + "format":{ + "shape":"String", + "documentation":"

    Serialization format for the change records being returned. Currently, the only supported value is NQUADS.

    " + }, + "records":{ + "shape":"SparqlRecordsList", + "documentation":"

    An array of serialized change-log stream records included in the response.

    " + }, + "totalRecords":{ + "shape":"Integer", + "documentation":"

    The total number of records in the response.

    " + } + } + }, + "GraphSummaryType":{ + "type":"string", + "enum":[ + "basic", + "detailed" + ] + }, + "GremlinQueries":{ + "type":"list", + "member":{"shape":"GremlinQueryStatus"} + }, + "GremlinQueryStatus":{ + "type":"structure", + "members":{ + "queryId":{ + "shape":"String", + "documentation":"

    The ID of the Gremlin query.

    " + }, + "queryString":{ + "shape":"String", + "documentation":"

    The query string of the Gremlin query.

    " + }, + "queryEvalStats":{ + "shape":"QueryEvalStats", + "documentation":"

    The query statistics of the Gremlin query.

    " + } + }, + "documentation":"

    Captures the status of a Gremlin query (see the Gremlin query status API page).

    " + }, + "GremlinQueryStatusAttributes":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

    The status message.

    " + }, + "code":{ + "shape":"Integer", + "documentation":"

    The HTTP response code returned fro the Gremlin query request..

    " + }, + "attributes":{ + "shape":"Document", + "documentation":"

    Attributes of the Gremlin query status.

    " + } + }, + "documentation":"

    Contains status components of a Gremlin query.

    " + }, + "IllegalArgumentException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when an argument in a request is not supported.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalFailureException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the processing of the request failed unexpectedly.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidArgumentException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when an argument in a request has an invalid value.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidNumericDataException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when invalid numerical data is encountered when servicing a request.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that includes an invalid parameter.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a parameter value is not valid.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IteratorType":{ + "type":"string", + "enum":[ + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER", + "TRIM_HORIZON", + "LATEST" + ] + }, + "ListGremlinQueriesInput":{ + "type":"structure", + "members":{ + "includeWaiting":{ + "shape":"Boolean", + "documentation":"

    If set to TRUE, the list returned includes waiting queries. The default is FALSE;

    ", + "location":"querystring", + "locationName":"includeWaiting" + } + } + }, + "ListGremlinQueriesOutput":{ + "type":"structure", + "members":{ + "acceptedQueryCount":{ + "shape":"Integer", + "documentation":"

    The number of queries that have been accepted but not yet completed, including queries in the queue.

    " + }, + "runningQueryCount":{ + "shape":"Integer", + "documentation":"

    The number of Gremlin queries currently running.

    " + }, + "queries":{ + "shape":"GremlinQueries", + "documentation":"

    A list of the current queries.

    " + } + } + }, + "ListLoaderJobsInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"ListLoaderJobsInputLimitInteger", + "documentation":"

    The number of load IDs to list. Must be a positive integer greater than zero and not more than 100 (which is the default).

    ", + "location":"querystring", + "locationName":"limit" + }, + "includeQueuedLoads":{ + "shape":"Boolean", + "documentation":"

    An optional parameter that can be used to exclude the load IDs of queued load requests when requesting a list of load IDs by setting the parameter to FALSE. The default value is TRUE.

    ", + "location":"querystring", + "locationName":"includeQueuedLoads" + } + } + }, + "ListLoaderJobsInputLimitInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListLoaderJobsOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    Returns the status of the job list request.

    " + }, + "payload":{ + "shape":"LoaderIdResult", + "documentation":"

    The requested list of job IDs.

    " + } + } + }, + "ListMLDataProcessingJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLDataProcessingJobsInputMaxItemsInteger", + "documentation":"

    The maximum number of items to return (from 1 to 1024; the default is 10).

    ", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLDataProcessingJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLDataProcessingJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"

    A page listing data processing job IDs.

    " + } + } + }, + "ListMLEndpointsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLEndpointsInputMaxItemsInteger", + "documentation":"

    The maximum number of items to return (from 1 to 1024; the default is 10.

    ", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLEndpointsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLEndpointsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"

    A page from the list of inference endpoint IDs.

    " + } + } + }, + "ListMLModelTrainingJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLModelTrainingJobsInputMaxItemsInteger", + "documentation":"

    The maximum number of items to return (from 1 to 1024; the default is 10).

    ", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLModelTrainingJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLModelTrainingJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"

    A page of the list of model training job IDs.

    " + } + } + }, + "ListMLModelTransformJobsInput":{ + "type":"structure", + "members":{ + "maxItems":{ + "shape":"ListMLModelTransformJobsInputMaxItemsInteger", + "documentation":"

    The maximum number of items to return (from 1 to 1024; the default is 10).

    ", + "location":"querystring", + "locationName":"maxItems" + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    ", + "location":"querystring", + "locationName":"neptuneIamRoleArn" + } + } + }, + "ListMLModelTransformJobsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":1024, + "min":1 + }, + "ListMLModelTransformJobsOutput":{ + "type":"structure", + "members":{ + "ids":{ + "shape":"StringList", + "documentation":"

    A page from the list of model transform IDs.

    " + } + } + }, + "ListOpenCypherQueriesInput":{ + "type":"structure", + "members":{ + "includeWaiting":{ + "shape":"Boolean", + "documentation":"

    When set to TRUE and other parameters are not present, causes status information to be returned for waiting queries as well as for running queries.

    ", + "location":"querystring", + "locationName":"includeWaiting" + } + } + }, + "ListOpenCypherQueriesOutput":{ + "type":"structure", + "members":{ + "acceptedQueryCount":{ + "shape":"Integer", + "documentation":"

    The number of queries that have been accepted but not yet completed, including queries in the queue.

    " + }, + "runningQueryCount":{ + "shape":"Integer", + "documentation":"

    The number of currently running openCypher queries.

    " + }, + "queries":{ + "shape":"OpenCypherQueries", + "documentation":"

    A list of current openCypher queries.

    " + } + } + }, + "LoadUrlAccessDeniedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when access is denied to a specified load URL.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LoaderIdResult":{ + "type":"structure", + "members":{ + "loadIds":{ + "shape":"StringList", + "documentation":"

    A list of load IDs.

    " + } + }, + "documentation":"

    Contains a list of load IDs.

    " + }, + "Long":{ + "type":"long", + "box":true + }, + "LongValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Long"} + }, + "LongValuedMapList":{ + "type":"list", + "member":{"shape":"LongValuedMap"} + }, + "MLResourceNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a specified machine-learning resource could not be found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "MalformedQueryException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the malformed query request.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a query is submitted that is syntactically incorrect or does not pass additional validation.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ManagePropertygraphStatisticsInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"StatisticsAutoGenerationMode", + "documentation":"

    The statistics generation mode. One of: DISABLE_AUTOCOMPUTE, ENABLE_AUTOCOMPUTE, or REFRESH, the last of which manually triggers DFE statistics generation.

    " + } + } + }, + "ManagePropertygraphStatisticsOutput":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200.

    " + }, + "payload":{ + "shape":"RefreshStatisticsIdMap", + "documentation":"

    This is only returned for refresh mode.

    " + } + } + }, + "ManageSparqlStatisticsInput":{ + "type":"structure", + "members":{ + "mode":{ + "shape":"StatisticsAutoGenerationMode", + "documentation":"

    The statistics generation mode. One of: DISABLE_AUTOCOMPUTE, ENABLE_AUTOCOMPUTE, or REFRESH, the last of which manually triggers DFE statistics generation.

    " + } + } + }, + "ManageSparqlStatisticsOutput":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP return code of the request. If the request succeeded, the code is 200.

    " + }, + "payload":{ + "shape":"RefreshStatisticsIdMap", + "documentation":"

    This is only returned for refresh mode.

    " + } + } + }, + "MemoryLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that failed.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request fails because of insufficient memory resources. The request can be retried.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "MethodNotAllowedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the HTTP method used by a request is not supported by the endpoint being used.

    ", + "error":{ + "httpStatusCode":405, + "senderFault":true + }, + "exception":true + }, + "MissingParameterException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in which the parameter is missing.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a required parameter is missing.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "MlConfigDefinition":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The configuration name.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The ARN for the configuration.

    " + } + }, + "documentation":"

    Contains a Neptune ML configuration.

    " + }, + "MlModels":{ + "type":"list", + "member":{"shape":"MlConfigDefinition"} + }, + "MlResourceDefinition":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The resource name.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The resource ARN.

    " + }, + "status":{ + "shape":"String", + "documentation":"

    The resource status.

    " + }, + "outputLocation":{ + "shape":"String", + "documentation":"

    The output location.

    " + }, + "failureReason":{ + "shape":"String", + "documentation":"

    The failure reason, in case of a failure.

    " + }, + "cloudwatchLogUrl":{ + "shape":"String", + "documentation":"

    The CloudWatch log URL for the resource.

    " + } + }, + "documentation":"

    Defines a Neptune ML resource.

    " + }, + "Mode":{ + "type":"string", + "enum":[ + "RESUME", + "NEW", + "AUTO" + ] + }, + "Models":{ + "type":"list", + "member":{"shape":"MlConfigDefinition"} + }, + "NodeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"

    Number of nodes that have this specific structure.

    " + }, + "nodeProperties":{ + "shape":"NodeProperties", + "documentation":"

    A list of the node properties present in this specific structure.

    " + }, + "distinctOutgoingEdgeLabels":{ + "shape":"OutgoingEdgeLabels", + "documentation":"

    A list of distinct outgoing edge labels present in this specific structure.

    " + } + }, + "documentation":"

    A node structure.

    " + }, + "NodeStructures":{ + "type":"list", + "member":{"shape":"NodeStructure"} + }, + "OpenCypherExplainMode":{ + "type":"string", + "enum":[ + "static", + "dynamic", + "details" + ] + }, + "OpenCypherQueries":{ + "type":"list", + "member":{"shape":"GremlinQueryStatus"} + }, + "OutgoingEdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "Parallelism":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH", + "OVERSUBSCRIBE" + ] + }, + "ParsingException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a parsing issue is encountered.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "PositiveInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PreconditionsFailedException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a precondition for processing a request is not satisfied.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "Predicates":{ + "type":"list", + "member":{"shape":"String"} + }, + "PropertygraphData":{ + "type":"structure", + "required":[ + "id", + "type", + "key", + "value" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The ID of the Gremlin or openCypher element.

    " + }, + "type":{ + "shape":"String", + "documentation":"

    The type of this Gremlin or openCypher element. Must be one of:

    • v1   -   Vertex label for Gremlin, or node label for openCypher.

    • vp   -   Vertex properties for Gremlin, or node properties for openCypher.

    • e   -   Edge and edge label for Gremlin, or relationship and relationship type for openCypher.

    • ep   -   Edge properties for Gremlin, or relationship properties for openCypher.

    " + }, + "key":{ + "shape":"String", + "documentation":"

    The property name. For element labels, this is label.

    " + }, + "value":{ + "shape":"Document", + "documentation":"

    This is a JSON object that contains a value field for the value itself, and a datatype field for the JSON data type of that value:

    " + }, + "from":{ + "shape":"String", + "documentation":"

    If this is an edge (type = e), the ID of the corresponding from vertex or source node.

    " + }, + "to":{ + "shape":"String", + "documentation":"

    If this is an edge (type = e), the ID of the corresponding to vertex or target node.

    " + } + }, + "documentation":"

    A Gremlin or openCypher change record.

    " + }, + "PropertygraphRecord":{ + "type":"structure", + "required":[ + "commitTimestampInMillis", + "eventId", + "data", + "op" + ], + "members":{ + "commitTimestampInMillis":{ + "shape":"Long", + "documentation":"

    The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.

    ", + "locationName":"commitTimestamp" + }, + "eventId":{ + "shape":"StringValuedMap", + "documentation":"

    The sequence identifier of the stream change record.

    " + }, + "data":{ + "shape":"PropertygraphData", + "documentation":"

    The serialized Gremlin or openCypher change record.

    " + }, + "op":{ + "shape":"String", + "documentation":"

    The operation that created the change.

    " + }, + "isLastOp":{ + "shape":"Boolean", + "documentation":"

    Only present if this operation is the last one in its transaction. If present, it is set to true. It is useful for ensuring that an entire transaction is consumed.

    " + } + }, + "documentation":"

    Structure of a property graph record.

    " + }, + "PropertygraphRecordsList":{ + "type":"list", + "member":{"shape":"PropertygraphRecord"} + }, + "PropertygraphSummary":{ + "type":"structure", + "members":{ + "numNodes":{ + "shape":"Long", + "documentation":"

    The number of nodes in the graph.

    " + }, + "numEdges":{ + "shape":"Long", + "documentation":"

    The number of edges in the graph.

    " + }, + "numNodeLabels":{ + "shape":"Long", + "documentation":"

    The number of distinct node labels in the graph.

    " + }, + "numEdgeLabels":{ + "shape":"Long", + "documentation":"

    The number of distinct edge labels in the graph.

    " + }, + "nodeLabels":{ + "shape":"NodeLabels", + "documentation":"

    A list of the distinct node labels in the graph.

    " + }, + "edgeLabels":{ + "shape":"EdgeLabels", + "documentation":"

    A list of the distinct edge labels in the graph.

    " + }, + "numNodeProperties":{ + "shape":"Long", + "documentation":"

    A list of the distinct node properties in the graph, along with the count of nodes where each property is used.

    " + }, + "numEdgeProperties":{ + "shape":"Long", + "documentation":"

    The number of distinct edge properties in the graph.

    " + }, + "nodeProperties":{ + "shape":"LongValuedMapList", + "documentation":"

    The number of distinct node properties in the graph.

    " + }, + "edgeProperties":{ + "shape":"LongValuedMapList", + "documentation":"

    A list of the distinct edge properties in the graph, along with the count of edges where each property is used.

    " + }, + "totalNodePropertyValues":{ + "shape":"Long", + "documentation":"

    The total number of usages of all node properties.

    " + }, + "totalEdgePropertyValues":{ + "shape":"Long", + "documentation":"

    The total number of usages of all edge properties.

    " + }, + "nodeStructures":{ + "shape":"NodeStructures", + "documentation":"

    This field is only present when the requested mode is DETAILED. It contains a list of node structures.

    " + }, + "edgeStructures":{ + "shape":"EdgeStructures", + "documentation":"

    This field is only present when the requested mode is DETAILED. It contains a list of edge structures.

    " + } + }, + "documentation":"

    The graph summary API returns a read-only list of node and edge labels and property keys, along with counts of nodes, edges, and properties. See Graph summary response for a property graph (PG).

    " + }, + "PropertygraphSummaryValueMap":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"

    The version of this graph summary response.

    " + }, + "lastStatisticsComputationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The timestamp, in ISO 8601 format, of the time at which Neptune last computed statistics.

    " + }, + "graphSummary":{ + "shape":"PropertygraphSummary", + "documentation":"

    The graph summary.

    " + } + }, + "documentation":"

    Payload for the property graph summary response.

    " + }, + "QueryEvalStats":{ + "type":"structure", + "members":{ + "waited":{ + "shape":"Integer", + "documentation":"

    Indicates how long the query waited, in milliseconds.

    " + }, + "elapsed":{ + "shape":"Integer", + "documentation":"

    The number of milliseconds the query has been running so far.

    " + }, + "cancelled":{ + "shape":"Boolean", + "documentation":"

    Set to TRUE if the query was cancelled, or FALSE otherwise.

    " + }, + "subqueries":{ + "shape":"Document", + "documentation":"

    The number of subqueries in this query.

    " + } + }, + "documentation":"

    Structure to capture query statistics such as how many queries are running, accepted or waiting and their details.

    " + }, + "QueryLanguageVersion":{ + "type":"structure", + "required":["version"], + "members":{ + "version":{ + "shape":"String", + "documentation":"

    The version of the query language.

    " + } + }, + "documentation":"

    Structure for expressing the query language version.

    " + }, + "QueryLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request which exceeded the limit.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the number of active queries exceeds what the server can process. The query in question can be retried when the system is less busy.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "QueryLimitException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that exceeded the limit.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the size of a query exceeds the system limit.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "QueryTooLargeException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that is too large.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the body of a query is too large.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RDFGraphSummary":{ + "type":"structure", + "members":{ + "numDistinctSubjects":{ + "shape":"Long", + "documentation":"

    The number of distinct subjects in the graph.

    " + }, + "numDistinctPredicates":{ + "shape":"Long", + "documentation":"

    The number of distinct predicates in the graph.

    " + }, + "numQuads":{ + "shape":"Long", + "documentation":"

    The number of quads in the graph.

    " + }, + "numClasses":{ + "shape":"Long", + "documentation":"

    The number of classes in the graph.

    " + }, + "classes":{ + "shape":"Classes", + "documentation":"

    A list of the classes in the graph.

    " + }, + "predicates":{ + "shape":"LongValuedMapList", + "documentation":"

    \"A list of predicates in the graph, along with the predicate counts.

    " + }, + "subjectStructures":{ + "shape":"SubjectStructures", + "documentation":"

    This field is only present when the request mode is DETAILED. It contains a list of subject structures.

    " + } + }, + "documentation":"

    The RDF graph summary API returns a read-only list of classes and predicate keys, along with counts of quads, subjects, and predicates.

    " + }, + "RDFGraphSummaryValueMap":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"

    The version of this graph summary response.

    " + }, + "lastStatisticsComputationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The timestamp, in ISO 8601 format, of the time at which Neptune last computed statistics.

    " + }, + "graphSummary":{ + "shape":"RDFGraphSummary", + "documentation":"

    The graph summary of an RDF graph. See Graph summary response for an RDF graph.

    " + } + }, + "documentation":"

    Payload for an RDF graph summary response.

    " + }, + "ReadOnlyViolationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in which the parameter is missing.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request attempts to write to a read-only resource.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "RefreshStatisticsIdMap":{ + "type":"structure", + "members":{ + "statisticsId":{ + "shape":"String", + "documentation":"

    The ID of the statistics generation run that is currently occurring.

    " + } + }, + "documentation":"

    Statistics for REFRESH mode.

    " + }, + "ReportAsText":{"type":"blob"}, + "S3BucketRegion":{ + "type":"string", + "enum":[ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "ca-central-1", + "sa-east-1", + "eu-north-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "eu-central-1", + "me-south-1", + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-south-1", + "cn-north-1", + "cn-northwest-1", + "us-gov-west-1", + "us-gov-east-1" + ] + }, + "S3Exception":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when there is a problem accessing Amazon S3.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ServerShutdownException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the server shuts down while processing a request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "SparqlData":{ + "type":"structure", + "required":["stmt"], + "members":{ + "stmt":{ + "shape":"String", + "documentation":"

    Holds an N-QUADS statement expressing the changed quad.

    " + } + }, + "documentation":"

    Neptune logs are converted to SPARQL quads in the graph using the Resource Description Framework (RDF) N-QUADS language defined in the W3C RDF 1.1 N-Quads specification

    " + }, + "SparqlRecord":{ + "type":"structure", + "required":[ + "commitTimestampInMillis", + "eventId", + "data", + "op" + ], + "members":{ + "commitTimestampInMillis":{ + "shape":"Long", + "documentation":"

    The time at which the commit for the transaction was requested, in milliseconds from the Unix epoch.

    ", + "locationName":"commitTimestamp" + }, + "eventId":{ + "shape":"StringValuedMap", + "documentation":"

    The sequence identifier of the stream change record.

    " + }, + "data":{ + "shape":"SparqlData", + "documentation":"

    The serialized SPARQL change record. The serialization formats of each record are described in more detail in Serialization Formats in Neptune Streams.

    " + }, + "op":{ + "shape":"String", + "documentation":"

    The operation that created the change.

    " + }, + "isLastOp":{ + "shape":"Boolean", + "documentation":"

    Only present if this operation is the last one in its transaction. If present, it is set to true. It is useful for ensuring that an entire transaction is consumed.

    " + } + }, + "documentation":"

    A serialized SPARQL stream record capturing a change-log entry for the RDF graph.

    " + }, + "SparqlRecordsList":{ + "type":"list", + "member":{"shape":"SparqlRecord"} + }, + "StartLoaderJobInput":{ + "type":"structure", + "required":[ + "source", + "format", + "s3BucketRegion", + "iamRoleArn" + ], + "members":{ + "source":{ + "shape":"String", + "documentation":"

    The source parameter accepts an S3 URI that identifies a single file, multiple files, a folder, or multiple folders. Neptune loads every data file in any folder that is specified.

    The URI can be in any of the following formats.

    • s3://(bucket_name)/(object-key-name)

    • https://s3.amazonaws.com/(bucket_name)/(object-key-name)

    • https://s3.us-east-1.amazonaws.com/(bucket_name)/(object-key-name)

    The object-key-name element of the URI is equivalent to the prefix parameter in an S3 ListObjects API call. It identifies all the objects in the specified S3 bucket whose names begin with that prefix. That can be a single file or folder, or multiple files and/or folders.

    The specified folder or folders can contain multiple vertex files and multiple edge files.

    " + }, + "format":{ + "shape":"Format", + "documentation":"

    The format of the data. For more information about data formats for the Neptune Loader command, see Load Data Formats.

    Allowed values

    " + }, + "s3BucketRegion":{ + "shape":"S3BucketRegion", + "documentation":"

    The Amazon region of the S3 bucket. This must match the Amazon Region of the DB cluster.

    ", + "locationName":"region" + }, + "iamRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for an IAM role to be assumed by the Neptune DB instance for access to the S3 bucket. The IAM role ARN provided here should be attached to the DB cluster (see Adding the IAM Role to an Amazon Neptune Cluster.

    " + }, + "mode":{ + "shape":"Mode", + "documentation":"

    The load job mode.

    Allowed values: RESUME, NEW, AUTO.

    Default value: AUTO.

    • RESUME   –   In RESUME mode, the loader looks for a previous load from this source, and if it finds one, resumes that load job. If no previous load job is found, the loader stops.

      The loader avoids reloading files that were successfully loaded in a previous job. It only tries to process failed files. If you dropped previously loaded data from your Neptune cluster, that data is not reloaded in this mode. If a previous load job loaded all files from the same source successfully, nothing is reloaded, and the loader returns success.

    • NEW   –   In NEW mode, the creates a new load request regardless of any previous loads. You can use this mode to reload all the data from a source after dropping previously loaded data from your Neptune cluster, or to load new data available at the same source.

    • AUTO   –   In AUTO mode, the loader looks for a previous load job from the same source, and if it finds one, resumes that job, just as in RESUME mode.

      If the loader doesn't find a previous load job from the same source, it loads all data from the source, just as in NEW mode.

    " + }, + "failOnError":{ + "shape":"Boolean", + "documentation":"

    failOnError   –   A flag to toggle a complete stop on an error.

    Allowed values: \"TRUE\", \"FALSE\".

    Default value: \"TRUE\".

    When this parameter is set to \"FALSE\", the loader tries to load all the data in the location specified, skipping any entries with errors.

    When this parameter is set to \"TRUE\", the loader stops as soon as it encounters an error. Data loaded up to that point persists.

    " + }, + "parallelism":{ + "shape":"Parallelism", + "documentation":"

    The optional parallelism parameter can be set to reduce the number of threads used by the bulk load process.

    Allowed values:

    • LOW –   The number of threads used is the number of available vCPUs divided by 8.

    • MEDIUM –   The number of threads used is the number of available vCPUs divided by 2.

    • HIGH –   The number of threads used is the same as the number of available vCPUs.

    • OVERSUBSCRIBE –   The number of threads used is the number of available vCPUs multiplied by 2. If this value is used, the bulk loader takes up all available resources.

      This does not mean, however, that the OVERSUBSCRIBE setting results in 100% CPU utilization. Because the load operation is I/O bound, the highest CPU utilization to expect is in the 60% to 70% range.

    Default value: HIGH

    The parallelism setting can sometimes result in a deadlock between threads when loading openCypher data. When this happens, Neptune returns the LOAD_DATA_DEADLOCK error. You can generally fix the issue by setting parallelism to a lower setting and retrying the load command.

    " + }, + "parserConfiguration":{ + "shape":"StringValuedMap", + "documentation":"

    parserConfiguration   –   An optional object with additional parser configuration values. Each of the child parameters is also optional:

    • namedGraphUri   –   The default graph for all RDF formats when no graph is specified (for non-quads formats and NQUAD entries with no graph).

      The default is https://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph.

    • baseUri   –   The base URI for RDF/XML and Turtle formats.

      The default is https://aws.amazon.com/neptune/default.

    • allowEmptyStrings   –   Gremlin users need to be able to pass empty string values(\"\") as node and edge properties when loading CSV data. If allowEmptyStrings is set to false (the default), such empty strings are treated as nulls and are not loaded.

      If allowEmptyStrings is set to true, the loader treats empty strings as valid property values and loads them accordingly.

    " + }, + "updateSingleCardinalityProperties":{ + "shape":"Boolean", + "documentation":"

    updateSingleCardinalityProperties is an optional parameter that controls how the bulk loader treats a new value for single-cardinality vertex or edge properties. This is not supported for loading openCypher data.

    Allowed values: \"TRUE\", \"FALSE\".

    Default value: \"FALSE\".

    By default, or when updateSingleCardinalityProperties is explicitly set to \"FALSE\", the loader treats a new value as an error, because it violates single cardinality.

    When updateSingleCardinalityProperties is set to \"TRUE\", on the other hand, the bulk loader replaces the existing value with the new one. If multiple edge or single-cardinality vertex property values are provided in the source file(s) being loaded, the final value at the end of the bulk load could be any one of those new values. The loader only guarantees that the existing value has been replaced by one of the new ones.

    " + }, + "queueRequest":{ + "shape":"Boolean", + "documentation":"

    This is an optional flag parameter that indicates whether the load request can be queued up or not.

    You don't have to wait for one load job to complete before issuing the next one, because Neptune can queue up as many as 64 jobs at a time, provided that their queueRequest parameters are all set to \"TRUE\".

    If the queueRequest parameter is omitted or set to \"FALSE\", the load request will fail if another load job is already running.

    Allowed values: \"TRUE\", \"FALSE\".

    Default value: \"FALSE\".

    " + }, + "dependencies":{ + "shape":"StringList", + "documentation":"

    This is an optional parameter that can make a queued load request contingent on the successful completion of one or more previous jobs in the queue.

    Neptune can queue up as many as 64 load requests at a time, if their queueRequest parameters are set to \"TRUE\". The dependencies parameter lets you make execution of such a queued request dependent on the successful completion of one or more specified previous requests in the queue.

    For example, if load Job-A and Job-B are independent of each other, but load Job-C needs Job-A and Job-B to be finished before it begins, proceed as follows:

    1. Submit load-job-A and load-job-B one after another in any order, and save their load-ids.

    2. Submit load-job-C with the load-ids of the two jobs in its dependencies field:

    Because of the dependencies parameter, the bulk loader will not start Job-C until Job-A and Job-B have completed successfully. If either one of them fails, Job-C will not be executed, and its status will be set to LOAD_FAILED_BECAUSE_DEPENDENCY_NOT_SATISFIED.

    You can set up multiple levels of dependency in this way, so that the failure of one job will cause all requests that are directly or indirectly dependent on it to be cancelled.

    " + }, + "userProvidedEdgeIds":{ + "shape":"Boolean", + "documentation":"

    This parameter is required only when loading openCypher data that contains relationship IDs. It must be included and set to True when openCypher relationship IDs are explicitly provided in the load data (recommended).

    When userProvidedEdgeIds is absent or set to True, an :ID column must be present in every relationship file in the load.

    When userProvidedEdgeIds is present and set to False, relationship files in the load must not contain an :ID column. Instead, the Neptune loader automatically generates an ID for each relationship.

    It's useful to provide relationship IDs explicitly so that the loader can resume loading after error in the CSV data have been fixed, without having to reload any relationships that have already been loaded. If relationship IDs have not been explicitly assigned, the loader cannot resume a failed load if any relationship file has had to be corrected, and must instead reload all the relationships.

    " + } + } + }, + "StartLoaderJobOutput":{ + "type":"structure", + "required":[ + "status", + "payload" + ], + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The HTTP return code indicating the status of the load job.

    " + }, + "payload":{ + "shape":"StringValuedMap", + "documentation":"

    Contains a loadId name-value pair that provides an identifier for the load operation.

    " + } + } + }, + "StartMLDataProcessingJobInput":{ + "type":"structure", + "required":[ + "inputDataS3Location", + "processedDataS3Location" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    A unique identifier for the new job. The default is an autogenerated UUID.

    " + }, + "previousDataProcessingJobId":{ + "shape":"String", + "documentation":"

    The job ID of a completed data processing job run on an earlier version of the data.

    " + }, + "inputDataS3Location":{ + "shape":"String", + "documentation":"

    The URI of the Amazon S3 location where you want SageMaker to download the data needed to run the data processing job.

    " + }, + "processedDataS3Location":{ + "shape":"String", + "documentation":"

    The URI of the Amazon S3 location where you want SageMaker to save the results of a data processing job.

    " + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "processingInstanceType":{ + "shape":"String", + "documentation":"

    The type of ML instance used during data processing. Its memory should be large enough to hold the processed dataset. The default is the smallest ml.r5 type whose memory is ten times larger than the size of the exported graph data on disk.

    " + }, + "processingInstanceVolumeSizeInGB":{ + "shape":"Integer", + "documentation":"

    The disk volume size of the processing instance. Both input data and processed data are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML chooses the volume size automatically based on the data size.

    " + }, + "processingTimeOutInSeconds":{ + "shape":"Integer", + "documentation":"

    Timeout in seconds for the data processing job. The default is 86,400 (1 day).

    " + }, + "modelType":{ + "shape":"String", + "documentation":"

    One of the two model types that Neptune ML currently supports: heterogeneous graph models (heterogeneous), and knowledge graph (kge). The default is none. If not specified, Neptune ML chooses the model type automatically based on the data.

    " + }, + "configFileName":{ + "shape":"String", + "documentation":"

    A data specification file that describes how to load the exported graph data for training. The file is automatically generated by the Neptune export toolkit. The default is training-data-configuration.json.

    " + }, + "subnets":{ + "shape":"StringList", + "documentation":"

    The IDs of the subnets in the Neptune VPC. The default is None.

    " + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

    The VPC security group IDs. The default is None.

    " + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.

    " + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.

    " + } + } + }, + "StartMLDataProcessingJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique ID of the new data processing job.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The ARN of the data processing job.

    " + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"

    The time it took to create the new processing job, in milliseconds.

    " + } + } + }, + "StartMLModelTrainingJobInput":{ + "type":"structure", + "required":[ + "dataProcessingJobId", + "trainModelS3Location" + ], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    A unique identifier for the new job. The default is An autogenerated UUID.

    " + }, + "previousModelTrainingJobId":{ + "shape":"String", + "documentation":"

    The job ID of a completed model-training job that you want to update incrementally based on updated data.

    " + }, + "dataProcessingJobId":{ + "shape":"String", + "documentation":"

    The job ID of the completed data-processing job that has created the data that the training will work with.

    " + }, + "trainModelS3Location":{ + "shape":"String", + "documentation":"

    The location in Amazon S3 where the model artifacts are to be stored.

    " + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role for SageMaker execution.This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "baseProcessingInstanceType":{ + "shape":"String", + "documentation":"

    The type of ML instance used in preparing and managing training of ML models. This is a CPU instance chosen based on memory requirements for processing the training data and model.

    " + }, + "trainingInstanceType":{ + "shape":"String", + "documentation":"

    The type of ML instance used for model training. All Neptune ML models support CPU, GPU, and multiGPU training. The default is ml.p3.2xlarge. Choosing the right instance type for training depends on the task type, graph size, and your budget.

    " + }, + "trainingInstanceVolumeSizeInGB":{ + "shape":"Integer", + "documentation":"

    The disk volume size of the training instance. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. The default is 0. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.

    " + }, + "trainingTimeOutInSeconds":{ + "shape":"Integer", + "documentation":"

    Timeout in seconds for the training job. The default is 86,400 (1 day).

    " + }, + "maxHPONumberOfTrainingJobs":{ + "shape":"Integer", + "documentation":"

    Maximum total number of training jobs to start for the hyperparameter tuning job. The default is 2. Neptune ML automatically tunes the hyperparameters of the machine learning model. To obtain a model that performs well, use at least 10 jobs (in other words, set maxHPONumberOfTrainingJobs to 10). In general, the more tuning runs, the better the results.

    " + }, + "maxHPOParallelTrainingJobs":{ + "shape":"Integer", + "documentation":"

    Maximum number of parallel training jobs to start for the hyperparameter tuning job. The default is 2. The number of parallel jobs you can run is limited by the available resources on your training instance.

    " + }, + "subnets":{ + "shape":"StringList", + "documentation":"

    The IDs of the subnets in the Neptune VPC. The default is None.

    " + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

    The VPC security group IDs. The default is None.

    " + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.

    " + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.

    " + }, + "enableManagedSpotTraining":{ + "shape":"Boolean", + "documentation":"

    Optimizes the cost of training machine-learning models by using Amazon Elastic Compute Cloud spot instances. The default is False.

    " + }, + "customModelTrainingParameters":{ + "shape":"CustomModelTrainingParameters", + "documentation":"

    The configuration for custom model training. This is a JSON object.

    " + } + } + }, + "StartMLModelTrainingJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique ID of the new model training job.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The ARN of the new model training job.

    " + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"

    The model training job creation time, in milliseconds.

    " + } + } + }, + "StartMLModelTransformJobInput":{ + "type":"structure", + "required":["modelTransformOutputS3Location"], + "members":{ + "id":{ + "shape":"String", + "documentation":"

    A unique identifier for the new job. The default is an autogenerated UUID.

    " + }, + "dataProcessingJobId":{ + "shape":"String", + "documentation":"

    The job ID of a completed data-processing job. You must include either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName.

    " + }, + "mlModelTrainingJobId":{ + "shape":"String", + "documentation":"

    The job ID of a completed model-training job. You must include either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName.

    " + }, + "trainingJobName":{ + "shape":"String", + "documentation":"

    The name of a completed SageMaker training job. You must include either dataProcessingJobId and a mlModelTrainingJobId, or a trainingJobName.

    " + }, + "modelTransformOutputS3Location":{ + "shape":"String", + "documentation":"

    The location in Amazon S3 where the model artifacts are to be stored.

    " + }, + "sagemakerIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role for SageMaker execution. This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "neptuneIamRoleArn":{ + "shape":"String", + "documentation":"

    The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 resources. This must be listed in your DB cluster parameter group or an error will occur.

    " + }, + "customModelTransformParameters":{ + "shape":"CustomModelTransformParameters", + "documentation":"

    Configuration information for a model transform using a custom model. The customModelTransformParameters object contains the following fields, which must have values compatible with the saved model parameters from the training job:

    " + }, + "baseProcessingInstanceType":{ + "shape":"String", + "documentation":"

    The type of ML instance used in preparing and managing training of ML models. This is an ML compute instance chosen based on memory requirements for processing the training data and model.

    " + }, + "baseProcessingInstanceVolumeSizeInGB":{ + "shape":"Integer", + "documentation":"

    The disk volume size of the training instance in gigabytes. The default is 0. Both input data and the output model are stored on disk, so the volume size must be large enough to hold both data sets. If not specified or 0, Neptune ML selects a disk volume size based on the recommendation generated in the data processing step.

    " + }, + "subnets":{ + "shape":"StringList", + "documentation":"

    The IDs of the subnets in the Neptune VPC. The default is None.

    " + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

    The VPC security group IDs. The default is None.

    " + }, + "volumeEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instances that run the training job. The default is None.

    " + }, + "s3OutputEncryptionKMSKey":{ + "shape":"String", + "documentation":"

    The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the output of the processing job. The default is none.

    " + } + } + }, + "StartMLModelTransformJobOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The unique ID of the new model transform job.

    " + }, + "arn":{ + "shape":"String", + "documentation":"

    The ARN of the model transform job.

    " + }, + "creationTimeInMillis":{ + "shape":"Long", + "documentation":"

    The creation time of the model transform job, in milliseconds.

    " + } + } + }, + "Statistics":{ + "type":"structure", + "members":{ + "autoCompute":{ + "shape":"Boolean", + "documentation":"

    Indicates whether or not automatic statistics generation is enabled.

    " + }, + "active":{ + "shape":"Boolean", + "documentation":"

    Indicates whether or not DFE statistics generation is enabled at all.

    " + }, + "statisticsId":{ + "shape":"String", + "documentation":"

    Reports the ID of the current statistics generation run. A value of -1 indicates that no statistics have been generated.

    " + }, + "date":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The UTC time at which DFE statistics have most recently been generated.

    " + }, + "note":{ + "shape":"String", + "documentation":"

    A note about problems in the case where statistics are invalid.

    " + }, + "signatureInfo":{ + "shape":"StatisticsSummary", + "documentation":"

    A StatisticsSummary structure that contains:

    • signatureCount - The total number of signatures across all characteristic sets.

    • instanceCount - The total number of characteristic-set instances.

    • predicateCount - The total number of unique predicates.

    " + } + }, + "documentation":"

    Contains statistics information. The DFE engine uses information about the data in your Neptune graph to make effective trade-offs when planning query execution. This information takes the form of statistics that include so-called characteristic sets and predicate statistics that can guide query planning. See Managing statistics for the Neptune DFE to use.

    " + }, + "StatisticsAutoGenerationMode":{ + "type":"string", + "enum":[ + "disableAutoCompute", + "enableAutoCompute", + "refresh" + ] + }, + "StatisticsNotAvailableException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when statistics needed to satisfy a request are not available.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "StatisticsSummary":{ + "type":"structure", + "members":{ + "signatureCount":{ + "shape":"Integer", + "documentation":"

    The total number of signatures across all characteristic sets.

    " + }, + "instanceCount":{ + "shape":"Integer", + "documentation":"

    The total number of characteristic-set instances.

    " + }, + "predicateCount":{ + "shape":"Integer", + "documentation":"

    The total number of unique predicates.

    " + } + }, + "documentation":"

    Information about the characteristic sets generated in the statistics.

    " + }, + "StreamRecordsNotFoundException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when stream records requested by a query cannot be found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "SubjectStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"

    Number of occurrences of this specific structure.

    " + }, + "predicates":{ + "shape":"Predicates", + "documentation":"

    A list of predicates present in this specific structure.

    " + } + }, + "documentation":"

    A subject structure.

    " + }, + "SubjectStructures":{ + "type":"list", + "member":{"shape":"SubjectStructure"} + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ThrottlingException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that could not be processed for this reason.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the rate of requests exceeds the maximum throughput. Requests can be retried after encountering this exception.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "TimeLimitExceededException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that could not be processed for this reason.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the an operation exceeds the time limit allowed for it.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "TooManyRequestsException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request that could not be processed for this reason.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when the number of requests being processed exceeds the limit.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "UnsupportedOperationException":{ + "type":"structure", + "required":[ + "detailedMessage", + "requestId", + "code" + ], + "members":{ + "detailedMessage":{ + "shape":"String", + "documentation":"

    A detailed message describing the problem.

    " + }, + "requestId":{ + "shape":"String", + "documentation":"

    The ID of the request in question.

    " + }, + "code":{ + "shape":"String", + "documentation":"

    The HTTP status code returned with the exception.

    " + } + }, + "documentation":"

    Raised when a request attempts to initiate an operation that is not supported.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

    Neptune Data API

    The Amazon Neptune data API provides SDK support for more than 40 of Neptune's data operations, including data loading, query execution, data inquiry, and machine learning. It supports the Gremlin and openCypher query languages, and is available in all SDK languages. It automatically signs API requests and greatly simplifies integrating Neptune into your applications.

    " +} diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index e4f0b6981b08..33b46d51825f 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkfirewall/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/networkfirewall/src/main/resources/codegen-resources/endpoint-rule-set.json index d7fab30461fe..233c4205f82c 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://network-firewall-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://network-firewall-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://network-firewall-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://network-firewall-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://network-firewall.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://network-firewall.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://network-firewall.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://network-firewall.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json index 1c4d6ab4c787..e4764213633c 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json @@ -114,9 +114,11 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"LimitExceededException"}, + {"shape":"InsufficientCapacityException"} ], - "documentation":"

    Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains the Certificate Manager certificate references that Network Firewall uses to decrypt and re-encrypt inbound traffic.

    After you create a TLS inspection configuration, you associate it with a firewall policy.

    To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

    To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

    To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

    For more information about TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " + "documentation":"

    Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration contains the Certificate Manager certificate references that Network Firewall uses to decrypt and re-encrypt inbound traffic.

    After you create a TLS inspection configuration, you associate it with a new firewall policy.

    To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

    To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

    To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

    For more information about TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " }, "DeleteFirewall":{ "name":"DeleteFirewall", @@ -1029,7 +1031,7 @@ }, "TLSInspectionConfiguration":{ "shape":"TLSInspectionConfiguration", - "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " + "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " }, "Description":{ "shape":"Description", @@ -1410,7 +1412,7 @@ }, "TLSInspectionConfiguration":{ "shape":"TLSInspectionConfiguration", - "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " + "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " }, "TLSInspectionConfigurationResponse":{ "shape":"TLSInspectionConfigurationResponse", @@ -2205,7 +2207,7 @@ }, "PaginationToken":{ "type":"string", - "max":2048, + "max":4096, "min":1, "pattern":"[0-9A-Za-z:\\/+=]+$" }, @@ -2547,11 +2549,11 @@ "members":{ "Keyword":{ "shape":"Keyword", - "documentation":"

    " + "documentation":"

    The keyword for the Suricata compatible rule option. You must include a sid (signature ID), and can optionally include other keywords. For information about Suricata compatible keywords, see Rule options in the Suricata documentation.

    " }, "Settings":{ "shape":"Settings", - "documentation":"

    " + "documentation":"

    The settings of the Suricata compatible rule option. Rule options have zero or more setting values, and the number of possible and required settings depends on the Keyword. For more information about the settings for specific options, see Rule options.

    " } }, "documentation":"

    Additional settings for a stateful rule. This is part of the StatefulRule configuration.

    " @@ -2604,7 +2606,7 @@ }, "StatefulRules":{ "shape":"StatefulRules", - "documentation":"

    An array of individual stateful rules inspection criteria to be used together in a stateful rule group. Use this option to specify simple Suricata rules with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

    " + "documentation":"

    An array of individual stateful rules inspection criteria to be used together in a stateful rule group. Use this option to specify simple Suricata rules with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

    " }, "StatelessRulesAndCustomActions":{ "shape":"StatelessRulesAndCustomActions", @@ -2770,7 +2772,7 @@ "members":{ "Action":{ "shape":"StatefulAction", - "documentation":"

    Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

    The actions for a stateful rule are defined as follows:

    • PASS - Permits the packets to go to the intended destination.

    • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    • ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

      You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

    • REJECT - Drops TCP traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and a RST bit contained in the TCP header flags. Also sends an alert log mesage if alert logging is configured in the Firewall LoggingConfiguration.

      REJECT isn't currently available for use with IMAP and FTP protocols.

    " + "documentation":"

    Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

    The actions for a stateful rule are defined as follows:

    • PASS - Permits the packets to go to the intended destination.

    • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    • ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

      You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

    " }, "Header":{ "shape":"Header", @@ -2781,7 +2783,7 @@ "documentation":"

    Additional options for the rule. These are the Suricata RuleOptions settings.

    " } }, - "documentation":"

    A single Suricata rules specification, for use in a stateful rule group. Use this option to specify a simple Suricata rule with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

    " + "documentation":"

    A single Suricata rules specification, for use in a stateful rule group. Use this option to specify a simple Suricata rule with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

    " }, "StatefulRuleDirection":{ "type":"string", @@ -3023,7 +3025,7 @@ "documentation":"

    Lists the server certificate configurations that are associated with the TLS configuration.

    " } }, - "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " + "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " }, "TLSInspectionConfigurationMetadata":{ "type":"structure", @@ -3419,7 +3421,7 @@ }, "FirewallPolicy":{ "shape":"FirewallPolicy", - "documentation":"

    The updated firewall policy to use for the firewall.

    " + "documentation":"

    The updated firewall policy to use for the firewall. You can't add or remove a TLSInspectionConfiguration after you create a firewall policy. However, you can replace an existing TLS inspection configuration with another TLSInspectionConfiguration.

    " }, "Description":{ "shape":"Description", @@ -3606,7 +3608,7 @@ }, "TLSInspectionConfiguration":{ "shape":"TLSInspectionConfiguration", - "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " + "documentation":"

    The object that defines a TLS inspection configuration. This, along with TLSInspectionConfigurationResponse, define the TLS inspection configuration. You can retrieve all objects for a TLS inspection configuration by calling DescribeTLSInspectionConfiguration.

    Network Firewall uses a TLS inspection configuration to decrypt traffic. Network Firewall re-encrypts the traffic before sending it to its destination.

    To use a TLS inspection configuration, you add it to a new Network Firewall firewall policy, then you apply the firewall policy to a firewall. Network Firewall acts as a proxy service to decrypt and inspect inbound traffic. You can reference a TLS inspection configuration from more than one firewall policy, and you can use a firewall policy in more than one firewall. For more information about using TLS inspection configurations, see Decrypting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

    " }, "Description":{ "shape":"Description", diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 9664a1374555..d79a618c29d4 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index b94f402c2679..8b0d5ec040c0 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index b596e7b8b4b9..c6333209d6fa 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 988db6969270..31c579134609 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/paginators-1.json b/services/omics/src/main/resources/codegen-resources/paginators-1.json index 596e4bd7c24d..0254c8f78faa 100644 --- a/services/omics/src/main/resources/codegen-resources/paginators-1.json +++ b/services/omics/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "maxResults", "result_key": "annotationImportJobs" }, + "ListAnnotationStoreVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "annotationStoreVersions" + }, "ListAnnotationStores": { "input_token": "nextToken", "output_token": "nextToken", @@ -90,6 +96,12 @@ "limit_key": "maxResults", "result_key": "sequenceStores" }, + "ListShares": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "shares" + }, "ListVariantImportJobs": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/omics/src/main/resources/codegen-resources/service-2.json b/services/omics/src/main/resources/codegen-resources/service-2.json index 1d25521ae1b3..ce387de2ec31 100644 --- a/services/omics/src/main/resources/codegen-resources/service-2.json +++ b/services/omics/src/main/resources/codegen-resources/service-2.json @@ -35,6 +35,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "AcceptShare":{ + "name":"AcceptShare", + "http":{ + "method":"POST", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"AcceptShareRequest"}, + "output":{"shape":"AcceptShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Accepts a share for an analytics store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "BatchDeleteReadSet":{ "name":"BatchDeleteReadSet", "http":{ @@ -166,6 +188,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "CreateAnnotationStoreVersion":{ + "name":"CreateAnnotationStoreVersion", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/version", + "responseCode":200 + }, + "input":{"shape":"CreateAnnotationStoreVersionRequest"}, + "output":{"shape":"CreateAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a new version of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "CreateMultipartReadSetUpload":{ "name":"CreateMultipartReadSetUpload", "http":{ @@ -254,6 +298,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "CreateShare":{ + "name":"CreateShare", + "http":{ + "method":"POST", + "requestUri":"/share", + "responseCode":200 + }, + "input":{"shape":"CreateShareRequest"}, + "output":{"shape":"CreateShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a share offer that can be accepted outside the account by a subscriber. The share is created by the owner and accepted by the principal subscriber.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "CreateVariantStore":{ "name":"CreateVariantStore", "http":{ @@ -321,6 +387,28 @@ "endpoint":{"hostPrefix":"analytics-"}, "idempotent":true }, + "DeleteAnnotationStoreVersions":{ + "name":"DeleteAnnotationStoreVersions", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/versions/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteAnnotationStoreVersionsRequest"}, + "output":{"shape":"DeleteAnnotationStoreVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes one or multiple versions of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, "DeleteReference":{ "name":"DeleteReference", "http":{ @@ -436,6 +524,29 @@ "endpoint":{"hostPrefix":"control-storage-"}, "idempotent":true }, + "DeleteShare":{ + "name":"DeleteShare", + "http":{ + "method":"DELETE", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"DeleteShareRequest"}, + "output":{"shape":"DeleteShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes a share of an analytics store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, "DeleteVariantStore":{ "name":"DeleteVariantStore", "http":{ @@ -521,6 +632,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "GetAnnotationStoreVersion":{ + "name":"GetAnnotationStoreVersion", + "http":{ + "method":"GET", + "requestUri":"/annotationStore/{name}/version/{versionName}", + "responseCode":200 + }, + "input":{"shape":"GetAnnotationStoreVersionRequest"}, + "output":{"shape":"GetAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves the metadata for an annotation store version.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "GetReadSet":{ "name":"GetReadSet", "http":{ @@ -803,6 +934,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "GetShare":{ + "name":"GetShare", + "http":{ + "method":"GET", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"GetShareRequest"}, + "output":{"shape":"GetShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves the metadata for a share.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "GetVariantImportJob":{ "name":"GetVariantImportJob", "http":{ @@ -886,6 +1039,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "ListAnnotationStoreVersions":{ + "name":"ListAnnotationStoreVersions", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/versions", + "responseCode":200 + }, + "input":{"shape":"ListAnnotationStoreVersionsRequest"}, + "output":{"shape":"ListAnnotationStoreVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the versions of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "ListAnnotationStores":{ "name":"ListAnnotationStores", "http":{ @@ -1187,6 +1360,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "ListShares":{ + "name":"ListShares", + "http":{ + "method":"POST", + "requestUri":"/shares", + "responseCode":200 + }, + "input":{"shape":"ListSharesRequest"}, + "output":{"shape":"ListSharesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all shares associated with an account.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1401,7 +1596,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RequestTimeoutException"} ], - "documentation":"

    Starts a run.

    ", + "documentation":"

    Starts a workflow run. To duplicate a run, specify the run's ID and a role ARN. The remaining parameters are copied from the previous run.

    The total number of runs in your account is subject to a quota per Region. To avoid needing to delete runs manually, you can set the retention mode to REMOVE. Runs with this setting are deleted automatically when the run quoata is exceeded.

    ", "authtype":"v4", "endpoint":{"hostPrefix":"workflows-"} }, @@ -1493,6 +1688,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "UpdateAnnotationStoreVersion":{ + "name":"UpdateAnnotationStoreVersion", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/version/{versionName}", + "responseCode":200 + }, + "input":{"shape":"UpdateAnnotationStoreVersionRequest"}, + "output":{"shape":"UpdateAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the description of an annotation store version.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "UpdateRunGroup":{ "name":"UpdateRunGroup", "http":{ @@ -1614,6 +1829,27 @@ "max":64, "min":1 }, + "AcceptShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for a share offer for analytics store data.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "AcceptShareResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of an analytics store share.

    " + } + } + }, "AccessDeniedException":{ "type":"structure", "required":["message"], @@ -1773,6 +2009,7 @@ "required":[ "id", "destinationName", + "versionName", "roleArn", "status", "creationTime", @@ -1787,6 +2024,10 @@ "shape":"String", "documentation":"

    The job's destination annotation store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "roleArn":{ "shape":"Arn", "documentation":"

    The job's service role ARN.

    " @@ -1864,7 +2105,7 @@ "documentation":"

    The store's file format.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -1894,6 +2135,73 @@ "type":"list", "member":{"shape":"AnnotationStoreItem"} }, + "AnnotationStoreVersionItem":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "versionArn", + "name", + "versionName", + "description", + "creationTime", + "updateTime", + "statusMessage", + "versionSizeBytes" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The store ID for an annotation store version.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionArn":{ + "shape":"Arn", + "documentation":"

    The Arn for an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    A name given to an annotation store version to distinguish it from others.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of an annotation store version.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionSizeBytes":{ + "shape":"Long", + "documentation":"

    The size of an annotation store version in Bytes.

    " + } + }, + "documentation":"

    Annotation store versions.

    " + }, + "AnnotationStoreVersionItems":{ + "type":"list", + "member":{"shape":"AnnotationStoreVersionItem"} + }, "AnnotationType":{ "type":"string", "enum":[ @@ -1912,6 +2220,12 @@ "min":20, "pattern":"arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)" }, + "ArnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":10, + "min":1 + }, "BatchDeleteReadSetRequest":{ "type":"structure", "required":[ @@ -2097,17 +2411,21 @@ "documentation":"

    The genome reference for the store's annotations.

    " }, "name":{ - "shape":"CreateAnnotationStoreRequestNameString", + "shape":"StoreName", "documentation":"

    A name for the store.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " }, "tags":{ "shape":"TagMap", "documentation":"

    Tags for the store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, "sseConfig":{ "shape":"SseConfig", "documentation":"

    Server-side encryption (SSE) settings for the store.

    " @@ -2122,16 +2440,13 @@ } } }, - "CreateAnnotationStoreRequestNameString":{ - "type":"string", - "pattern":"([a-z]){1}([a-z0-9_]){2,254}" - }, "CreateAnnotationStoreResponse":{ "type":"structure", "required":[ "id", "status", "name", + "versionName", "creationTime" ], "members":{ @@ -2159,12 +2474,88 @@ "shape":"String", "documentation":"

    The store's name.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, "creationTime":{ "shape":"CreationTime", "documentation":"

    When the store was created.

    " } } }, + "CreateAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"StoreName", + "documentation":"

    The name of an annotation store version from which versions are being created.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Any tags added to annotation store version.

    " + } + } + }, + "CreateAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "id", + "versionName", + "storeId", + "name", + "status", + "creationTime" + ], + "members":{ + "id":{ + "shape":"ResourceId", + "documentation":"

    A generated ID for the annotation store

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The ID for the annotation store from which new versions are being created.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of a annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for the creation of an annotation store version.

    " + } + } + }, "CreateMultipartReadSetUploadRequest":{ "type":"structure", "required":[ @@ -2482,6 +2873,44 @@ } } }, + "CreateShareRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "principalSubscriber" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The resource ARN for the analytics store to be shared.

    " + }, + "principalSubscriber":{ + "shape":"String", + "documentation":"

    The principal subscriber is the account being given access to the analytics store data through the share offer.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    A name given to the share.

    " + } + } + }, + "CreateShareResponse":{ + "type":"structure", + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    An ID generated for the share.

    " + }, + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of a share.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    A name given to the share.

    " + } + } + }, "CreateVariantStoreRequest":{ "type":"structure", "required":["reference"], @@ -2491,11 +2920,11 @@ "documentation":"

    The genome reference for the store's variants.

    " }, "name":{ - "shape":"CreateVariantStoreRequestNameString", + "shape":"StoreName", "documentation":"

    A name for the store.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " }, "tags":{ @@ -2508,10 +2937,6 @@ } } }, - "CreateVariantStoreRequestNameString":{ - "type":"string", - "pattern":"([a-z]){1}([a-z0-9_]){2,254}" - }, "CreateVariantStoreResponse":{ "type":"structure", "required":[ @@ -2660,6 +3085,40 @@ } } }, + "DeleteAnnotationStoreVersionsRequest":{ + "type":"structure", + "required":[ + "name", + "versions" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the annotation store from which versions are being deleted.

    ", + "location":"uri", + "locationName":"name" + }, + "versions":{ + "shape":"VersionList", + "documentation":"

    The versions of an annotation store to be deleted.

    " + }, + "force":{ + "shape":"PrimitiveBoolean", + "documentation":"

    Forces the deletion of an annotation store version when imports are in-progress..

    ", + "location":"querystring", + "locationName":"force" + } + } + }, + "DeleteAnnotationStoreVersionsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"VersionDeleteErrorList", + "documentation":"

    Any errors that occur when attempting to delete an annotation store version.

    " + } + } + }, "DeleteReferenceRequest":{ "type":"structure", "required":[ @@ -2744,6 +3203,27 @@ "members":{ } }, + "DeleteShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for the share request to be deleted.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "DeleteShareResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of the share being deleted.

    " + } + } + }, "DeleteVariantStoreRequest":{ "type":"structure", "required":["name"], @@ -2784,6 +3264,11 @@ } } }, + "Description":{ + "type":"string", + "max":500, + "min":0 + }, "Encoding":{ "type":"string", "max":20, @@ -2945,6 +3430,20 @@ "CRAM" ] }, + "Filter":{ + "type":"structure", + "members":{ + "resourceArns":{ + "shape":"ArnList", + "documentation":"

    The Amazon Resource Number (Arn) for an analytics store.

    " + }, + "status":{ + "shape":"StatusList", + "documentation":"

    The status of an annotation store version.

    " + } + }, + "documentation":"

    Use filters to focus the returned annotation store versions on a specific parameter, such as the status of the annotation store.

    " + }, "FormatOptions":{ "type":"structure", "members":{ @@ -3004,6 +3503,7 @@ "required":[ "id", "destinationName", + "versionName", "roleArn", "status", "statusMessage", @@ -3023,6 +3523,10 @@ "shape":"StoreName", "documentation":"

    The job's destination annotation store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "roleArn":{ "shape":"Arn", "documentation":"

    The job's service role ARN.

    " @@ -3088,7 +3592,8 @@ "updateTime", "tags", "statusMessage", - "storeSizeBytes" + "storeSizeBytes", + "numVersions" ], "members":{ "id":{ @@ -3112,7 +3617,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -3135,17 +3640,113 @@ "shape":"StoreOptions", "documentation":"

    The store's parsing options.

    " }, - "storeFormat":{ - "shape":"StoreFormat", - "documentation":"

    The store's annotation file format.

    " + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

    The store's annotation file format.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    A status message.

    " + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

    The store's size in bytes.

    " + }, + "numVersions":{ + "shape":"Integer", + "documentation":"

    An integer indicating how many versions of an annotation store exist.

    " + } + } + }, + "GetAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"String", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    ", + "location":"uri", + "locationName":"versionName" + } + } + }, + "GetAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "versionArn", + "name", + "versionName", + "description", + "creationTime", + "updateTime", + "tags", + "statusMessage", + "versionSizeBytes" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The store ID for annotation store version.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionArn":{ + "shape":"Arn", + "documentation":"

    The Arn for the annotation store.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name of the annotation store.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description for an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Any tags associated with an annotation store version.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " }, "statusMessage":{ "shape":"StatusMessage", - "documentation":"

    A status message.

    " + "documentation":"

    The status of an annotation store version.

    " }, - "storeSizeBytes":{ + "versionSizeBytes":{ "shape":"Long", - "documentation":"

    The store's size in bytes.

    " + "documentation":"

    The size of the annotation store version in Bytes.

    " } } }, @@ -3919,6 +4520,10 @@ "accelerators":{ "shape":"Accelerators", "documentation":"

    The computational accelerator used to run the workflow.

    " + }, + "retentionMode":{ + "shape":"RunRetentionMode", + "documentation":"

    The run's retention mode.

    " } } }, @@ -4073,6 +4678,27 @@ } } }, + "GetShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The generated ID for a share.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "GetShareResponse":{ + "type":"structure", + "members":{ + "share":{ + "shape":"ShareDetails", + "documentation":"

    An analytic store share details object. contains status, resourceArn, ownerId, etc.

    " + } + } + }, "GetVariantImportRequest":{ "type":"structure", "required":["jobId"], @@ -4195,7 +4821,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -4548,6 +5174,10 @@ "type":"list", "member":{"shape":"ImportReferenceSourceItem"} }, + "Integer":{ + "type":"integer", + "box":true + }, "InternalServerException":{ "type":"structure", "required":["message"], @@ -4652,6 +5282,68 @@ } } }, + "ListAnnotationStoreVersionsFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + } + }, + "documentation":"

    Use filters to focus the returned annotation store versions on a specific parameter, such as the status of the annotation store.

    " + }, + "ListAnnotationStoreVersionsRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of an annotation store.

    ", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListAnnotationStoreVersionsRequestMaxResultsInteger", + "documentation":"

    The maximum number of annotation store versions to return in one page of results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListAnnotationStoreVersionsRequestNextTokenString", + "documentation":"

    Specifies the pagination token from a previous request to retrieve the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "filter":{ + "shape":"ListAnnotationStoreVersionsFilter", + "documentation":"

    A filter to apply to the list of annotation store versions.

    " + } + } + }, + "ListAnnotationStoreVersionsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAnnotationStoreVersionsRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListAnnotationStoreVersionsResponse":{ + "type":"structure", + "members":{ + "annotationStoreVersions":{ + "shape":"AnnotationStoreVersionItems", + "documentation":"

    Lists all versions of an annotation store.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Specifies the pagination token from a previous request to retrieve the next page of results.

    " + } + } + }, "ListAnnotationStoresFilter":{ "type":"structure", "members":{ @@ -5332,6 +6024,46 @@ } } }, + "ListSharesRequest":{ + "type":"structure", + "required":["resourceOwner"], + "members":{ + "resourceOwner":{ + "shape":"ResourceOwner", + "documentation":"

    The account that owns the analytics store shared.

    " + }, + "filter":{ + "shape":"Filter", + "documentation":"

    Attributes used to filter for a specific subset of shares.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Next token returned in the response of a previous ListReadSetUploadPartsRequest call. Used to get the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of shares to return in one page of results.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSharesResponse":{ + "type":"structure", + "required":["shares"], + "members":{ + "shares":{ + "shape":"ShareDetailsList", + "documentation":"

    The shares available and their meta details.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Next token returned in the response of a previous ListSharesResponse call. Used to get the next page of results.

    " + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -6338,6 +7070,13 @@ }, "exception":true }, + "ResourceOwner":{ + "type":"string", + "enum":[ + "SELF", + "OTHER" + ] + }, "RoleArn":{ "type":"string", "max":2048, @@ -6589,6 +7328,15 @@ "key":{"shape":"RunResourceDigestKey"}, "value":{"shape":"RunResourceDigest"} }, + "RunRetentionMode":{ + "type":"string", + "enum":[ + "RETAIN", + "REMOVE" + ], + "max":64, + "min":1 + }, "RunRoleArn":{ "type":"string", "max":128, @@ -6784,6 +7532,69 @@ }, "exception":true }, + "ShareDetails":{ + "type":"structure", + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for a share offer for an analytics store .

    " + }, + "resourceArn":{ + "shape":"String", + "documentation":"

    The resource Arn of the analytics store being shared.

    " + }, + "principalSubscriber":{ + "shape":"String", + "documentation":"

    The principal subscriber is the account the analytics store data is being shared with.

    " + }, + "ownerId":{ + "shape":"String", + "documentation":"

    The account ID for the data owner. The owner creates the share offer.

    " + }, + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of a share.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The status message for a share. It provides more details on the status of the share.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    The name of the share.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The timestamp for when the share was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The timestamp of the share update.

    " + } + }, + "documentation":"

    The details of a share.

    " + }, + "ShareDetailsList":{ + "type":"list", + "member":{"shape":"ShareDetails"} + }, + "ShareName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ShareStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACTIVATING", + "ACTIVE", + "DELETING", + "DELETED", + "FAILED" + ] + }, "SourceFiles":{ "type":"structure", "required":["source1"], @@ -6840,6 +7651,10 @@ "shape":"AnnotationImportItemSources", "documentation":"

    Items to import.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "formatOptions":{ "shape":"FormatOptions", "documentation":"

    Formatting options for the annotation file.

    " @@ -7222,11 +8037,11 @@ }, "workflowType":{ "shape":"WorkflowType", - "documentation":"

    The run's workflows type.

    " + "documentation":"

    The run's workflow type.

    " }, "runId":{ "shape":"RunId", - "documentation":"

    The run's ID.

    " + "documentation":"

    The ID of a run to duplicate.

    " }, "roleArn":{ "shape":"RunRoleArn", @@ -7268,6 +8083,10 @@ "shape":"RunRequestId", "documentation":"

    To ensure that requests don't run multiple times, specify a unique ID for each request.

    ", "idempotencyToken":true + }, + "retentionMode":{ + "shape":"RunRetentionMode", + "documentation":"

    The retention mode for the run.

    " } } }, @@ -7344,16 +8163,15 @@ } } }, + "StatusList":{ + "type":"list", + "member":{"shape":"ShareStatus"} + }, "StatusMessage":{ "type":"string", "max":1000, "min":0 }, - "StoreDescription":{ - "type":"string", - "max":500, - "min":0 - }, "StoreFormat":{ "type":"string", "enum":[ @@ -7618,6 +8436,30 @@ "max":5000, "min":1 }, + "TsvVersionOptions":{ + "type":"structure", + "members":{ + "annotationType":{ + "shape":"AnnotationType", + "documentation":"

    The store version's annotation type.

    " + }, + "formatToHeader":{ + "shape":"FormatToHeader", + "documentation":"

    The annotation store version's header key to column name mapping.

    " + }, + "schema":{ + "shape":"TsvVersionOptionsSchemaList", + "documentation":"

    The TSV schema for an annotation store version.

    " + } + }, + "documentation":"

    The options for a TSV file.

    " + }, + "TsvVersionOptionsSchemaList":{ + "type":"list", + "member":{"shape":"SchemaItem"}, + "max":5000, + "min":1 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -7655,7 +8497,7 @@ "locationName":"name" }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " } } @@ -7689,7 +8531,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "creationTime":{ @@ -7710,6 +8552,78 @@ } } }, + "UpdateAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of an annotation store.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"String", + "documentation":"

    The name of an annotation store version.

    ", + "location":"uri", + "locationName":"versionName" + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store.

    " + } + } + }, + "UpdateAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "name", + "versionName", + "description", + "creationTime", + "updateTime" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The annotation store ID.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name of an annotation store.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of an annotation store version.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + } + } + }, "UpdateRunGroupRequest":{ "type":"structure", "required":["id"], @@ -7781,7 +8695,7 @@ "locationName":"name" }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " } } @@ -7815,7 +8729,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "creationTime":{ @@ -8058,7 +8972,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -8102,6 +9016,61 @@ }, "documentation":"

    Formatting options for a VCF file.

    " }, + "VersionDeleteError":{ + "type":"structure", + "required":[ + "versionName", + "message" + ], + "members":{ + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    The message explaining the error in annotation store deletion.

    " + } + }, + "documentation":"

    The error preventing deletion of the annotation store version.

    " + }, + "VersionDeleteErrorList":{ + "type":"list", + "member":{"shape":"VersionDeleteError"} + }, + "VersionList":{ + "type":"list", + "member":{"shape":"VersionName"}, + "max":10, + "min":1 + }, + "VersionName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"([a-z]){1}([a-z0-9_]){2,254}" + }, + "VersionOptions":{ + "type":"structure", + "members":{ + "tsvVersionOptions":{ + "shape":"TsvVersionOptions", + "documentation":"

    File settings for a version of a TSV store.

    " + } + }, + "documentation":"

    The options for an annotation store version.

    ", + "union":true + }, + "VersionStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, "WorkflowArn":{ "type":"string", "max":128, diff --git a/services/omics/src/main/resources/codegen-resources/waiters-2.json b/services/omics/src/main/resources/codegen-resources/waiters-2.json index db1de32eedd3..9e82e101dcbd 100644 --- a/services/omics/src/main/resources/codegen-resources/waiters-2.json +++ b/services/omics/src/main/resources/codegen-resources/waiters-2.json @@ -81,6 +81,54 @@ "expected" : "DELETING" } ] }, + "AnnotationStoreVersionCreated" : { + "description" : "Wait until an annotation store version is created", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStoreVersion", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CREATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "AnnotationStoreVersionDeleted" : { + "description" : "Wait until an annotation store version is deleted.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStoreVersion", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "DELETED" + }, { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + } ] + }, "ReadSetActivationJobCompleted" : { "description" : "Wait until a job is completed.", "delay" : 30, diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index b1436890b8b4..6a46b59b1f4c 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 4151ba0b6d09..0b8a240a3c46 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 87bcec4cbbc5..4b88e6dcb81f 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworks/src/main/resources/codegen-resources/customization.config b/services/opsworks/src/main/resources/codegen-resources/customization.config index 197b17d901a2..322f97faeedf 100644 --- a/services/opsworks/src/main/resources/codegen-resources/customization.config +++ b/services/opsworks/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "describeStacks", "describeUserProfiles" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateMyUserProfile", "describeAgentVersions", "describeApps", diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 081f6a9cee4a..71e64188d940 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 48ff8ed2127a..0397590f81c7 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json index b31c53546631..af8bfbf83c1b 100644 --- a/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,1048 +115,455 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations-fips.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-us-gov" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + "ref": "PartitionResult" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "cn-northwest-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true + "name" ] }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-gov-west-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true + "name" ] - } - ], - "type": "tree", - "rules": [ + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://organizations-fips.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-gov-west-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-global" + "supportsDualStack" ] } - ], + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], "endpoint": { - "url": "https://organizations.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, + "url": "https://organizations-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-cn-global" + "supportsFIPS" ] } - ], + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], "endpoint": { - "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "cn-northwest-1" - } - ] - }, + "url": "https://organizations-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-us-gov-global" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://organizations.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://organizations.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://organizations.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json b/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json index 9f0b8e8ae1d5..d84bb7c7e76f 100644 --- a/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json @@ -17,9 +17,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-global", "UseFIPS": false, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -39,9 +39,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-global", "UseFIPS": true, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -52,9 +52,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -74,9 +74,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -87,9 +87,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -109,9 +109,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -131,9 +131,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-cn-global", "UseFIPS": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { @@ -144,9 +144,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -157,9 +157,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -170,9 +170,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -192,9 +192,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -214,9 +214,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-us-gov-global", "UseFIPS": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-us-gov-global", "UseFIPS": true, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -249,9 +249,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -271,9 +271,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -284,9 +284,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -306,9 +306,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -319,9 +330,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -332,9 +354,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -345,9 +378,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -358,9 +402,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -371,9 +415,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -385,8 +429,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -396,9 +440,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -408,11 +452,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/organizations/src/main/resources/codegen-resources/service-2.json b/services/organizations/src/main/resources/codegen-resources/service-2.json index 699b8c7b3a89..8cf73c8e9c1b 100644 --- a/services/organizations/src/main/resources/codegen-resources/service-2.json +++ b/services/organizations/src/main/resources/codegen-resources/service-2.json @@ -34,7 +34,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

    Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

    You can only call this operation by the following principals when they also have the relevant IAM permissions:

    • Invitation to join or Approve all features request handshakes: only a principal from the member account.

      The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    • Enable all features final confirmation handshake: only a principal from the management account.

      For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.

    After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

    " + "documentation":"

    Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

    You can only call this operation by the following principals when they also have the relevant IAM permissions:

    • Invitation to join or Approve all features request handshakes: only a principal from the member account.

      The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    • Enable all features final confirmation handshake: only a principal from the management account.

      For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.

    After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

    " }, "AttachPolicy":{ "name":"AttachPolicy", @@ -58,7 +58,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "CancelHandshake":{ "name":"CancelHandshake", @@ -100,7 +100,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

    • Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation.

      While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED.

    • Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    • You can close only 10% of member accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account.

      After you reach this limit, you can close additional accounts in the Billing console. For more information, see Closing an account in the Amazon Web Services Billing and Cost Management User Guide.

    • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

    • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

    For more information about closing accounts, see Closing an Amazon Web Services account in the Organizations User Guide.

    " + "documentation":"

    Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

    • Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation.

      While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED.

    • Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    • You can close only 10% of member accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization in the Organizations User Guide.

    • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

    • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

    " }, "CreateAccount":{ "name":"CreateAccount", @@ -121,7 +121,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

    This operation can be called only from the organization's management account.

    For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

    " + "documentation":"

    Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

    This operation can be called only from the organization's management account.

    For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

    " }, "CreateGovCloudAccount":{ "name":"CreateGovCloudAccount", @@ -142,7 +142,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    This action is available if all of the following are true:

    • You're authorized to create accounts in the Amazon Web Services GovCloud (US) Region. For more information on the Amazon Web Services GovCloud (US) Region, see the Amazon Web Services GovCloud User Guide.

    • You already have an account in the Amazon Web Services GovCloud (US) Region that is paired with a management account of an organization in the commercial Region.

    • You call this action from the management account of your organization in the commercial Region.

    • You have the organizations:CreateGovCloudAccount permission.

    Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    Amazon Web Services automatically enables CloudTrail for Amazon Web Services GovCloud (US) accounts, but you should also do the following:

    • Verify that CloudTrail is enabled to store logs.

    • Create an Amazon S3 bucket for CloudTrail log storage.

      For more information, see Verifying CloudTrail Is Enabled in the Amazon Web Services GovCloud User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

    You call this action from the management account of your organization in the commercial Region to create a standalone Amazon Web Services account in the Amazon Web Services GovCloud (US) Region. After the account is created, the management account of an organization in the Amazon Web Services GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the Amazon Web Services GovCloud (US) to join an organization, see Organizations in the Amazon Web Services GovCloud User Guide.

    Calling CreateGovCloudAccount is an asynchronous request that Amazon Web Services performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the Amazon Web Services GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

    A role is created in the new account in the commercial Region that allows the management account in the organization in the commercial Region to assume it. An Amazon Web Services GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new Amazon Web Services GovCloud (US) account that can be assumed by the Amazon Web Services GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see Organizations in the Amazon Web Services GovCloud User Guide.

    For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Amazon Web Services Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

    " + "documentation":"

    This action is available if all of the following are true:

    • You're authorized to create accounts in the Amazon Web Services GovCloud (US) Region. For more information on the Amazon Web Services GovCloud (US) Region, see the Amazon Web Services GovCloud User Guide.

    • You already have an account in the Amazon Web Services GovCloud (US) Region that is paired with a management account of an organization in the commercial Region.

    • You call this action from the management account of your organization in the commercial Region.

    • You have the organizations:CreateGovCloudAccount permission.

    Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    Amazon Web Services automatically enables CloudTrail for Amazon Web Services GovCloud (US) accounts, but you should also do the following:

    • Verify that CloudTrail is enabled to store logs.

    • Create an Amazon S3 bucket for CloudTrail log storage.

      For more information, see Verifying CloudTrail Is Enabled in the Amazon Web Services GovCloud User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

    You call this action from the management account of your organization in the commercial Region to create a standalone Amazon Web Services account in the Amazon Web Services GovCloud (US) Region. After the account is created, the management account of an organization in the Amazon Web Services GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the Amazon Web Services GovCloud (US) to join an organization, see Organizations in the Amazon Web Services GovCloud User Guide.

    Calling CreateGovCloudAccount is an asynchronous request that Amazon Web Services performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the Amazon Web Services GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

    A role is created in the new account in the commercial Region that allows the management account in the organization in the commercial Region to assume it. An Amazon Web Services GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new Amazon Web Services GovCloud (US) account that can be assumed by the Amazon Web Services GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see Organizations in the Amazon Web Services GovCloud User Guide.

    For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Amazon Web Services Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

    " }, "CreateOrganization":{ "name":"CreateOrganization", @@ -162,7 +162,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

    Creates an Amazon Web Services organization. The account whose user is calling the CreateOrganization operation automatically becomes the management account of the new organization.

    This operation must be called using credentials from the account that is to become the new organization's management account. The principal must also have the relevant IAM permissions.

    By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", no policy types are enabled by default, and you can't use organization policies

    " + "documentation":"

    Creates an Amazon Web Services organization. The account whose user is calling the CreateOrganization operation automatically becomes the management account of the new organization.

    This operation must be called using credentials from the account that is to become the new organization's management account. The principal must also have the relevant IAM permissions.

    By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING, no policy types are enabled by default and you can't use organization policies.

    " }, "CreateOrganizationalUnit":{ "name":"CreateOrganizationalUnit", @@ -183,7 +183,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

    For more information about OUs, see Managing Organizational Units in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

    For more information about OUs, see Managing organizational units (OUs) in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " }, "CreatePolicy":{ "name":"CreatePolicy", @@ -206,7 +206,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

    For more information about policies and their use, see Managing Organization Policies.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

    For more information about policies and their use, see Managing Organizations policies.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DeclineHandshake":{ "name":"DeclineHandshake", @@ -282,7 +282,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -379,7 +379,7 @@ {"shape":"InvalidInputException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

    This operation applies only to policy types other than service control policies (SCPs).

    For more information about policy inheritance, see How Policy Inheritance Works in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

    This operation applies only to policy types other than service control policies (SCPs).

    For more information about policy inheritance, see Understanding management policy inheritance in the Organizations User Guide.

    This operation can be called from any account in the organization.

    " }, "DescribeHandshake":{ "name":"DescribeHandshake", @@ -468,7 +468,7 @@ {"shape":"ResourcePolicyNotFoundException"}, {"shape":"ConstraintViolationException"} ], - "documentation":"

    Retrieves information about a resource policy.

    You can only call this operation from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Retrieves information about a resource policy.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DetachPolicy":{ "name":"DetachPolicy", @@ -491,7 +491,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Detaches a policy from a target root, organizational unit (OU), or account.

    If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

    Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Detaches a policy from a target root, organizational unit (OU), or account.

    If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

    Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -510,7 +510,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Disables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    We strongly recommend that you don't use this command to disable integration between Organizations and the specified Amazon Web Services service. Instead, use the console or commands that are provided by the specified service. This lets the trusted service perform any required initialization when enabling trusted access, such as creating any required resources and any required clean up of resources when disabling trusted access.

    For information about how to disable trusted service access to your organization using the trusted service, see the Learn more link under the Supports Trusted Access column at Amazon Web Services services that you can use with Organizations. on this page.

    If you disable access by using this command, it causes the following actions to occur:

    • The service can no longer create a service-linked role in the accounts in your organization. This means that the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    • The service can no longer perform tasks in the member accounts in the organization, unless those operations are explicitly permitted by the IAM policies that are attached to your roles. This includes any data aggregation from the member accounts to the management account, or to a delegated administrator account, where relevant.

    • Some services detect this and clean up any remaining data or resources related to the integration, while other services stop accessing the organization but leave any historical data and configuration in place to support a possible re-enabling of the integration.

    Using the other service's console or commands to disable the integration ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts

    For more information about integrating other services with Organizations, including the list of services that work with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Disables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    We strongly recommend that you don't use this command to disable integration between Organizations and the specified Amazon Web Services service. Instead, use the console or commands that are provided by the specified service. This lets the trusted service perform any required initialization when enabling trusted access, such as creating any required resources and any required clean up of resources when disabling trusted access.

    For information about how to disable trusted service access to your organization using the trusted service, see the Learn more link under the Supports Trusted Access column at Amazon Web Services services that you can use with Organizations. on this page.

    If you disable access by using this command, it causes the following actions to occur:

    • The service can no longer create a service-linked role in the accounts in your organization. This means that the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    • The service can no longer perform tasks in the member accounts in the organization, unless those operations are explicitly permitted by the IAM policies that are attached to your roles. This includes any data aggregation from the member accounts to the management account, or to a delegated administrator account, where relevant.

    • Some services detect this and clean up any remaining data or resources related to the integration, while other services stop accessing the organization but leave any historical data and configuration in place to support a possible re-enabling of the integration.

    Using the other service's console or commands to disable the integration ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts

    For more information about integrating other services with Organizations, including the list of services that work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    This operation can be called only from the organization's management account.

    " }, "DisablePolicyType":{ "name":"DisablePolicyType", @@ -533,7 +533,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account.

    To view the status of available policy types in the organization, use DescribeOrganization.

    " + "documentation":"

    Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    To view the status of available policy types in the organization, use DescribeOrganization.

    " }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -552,7 +552,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

    We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    For more information about enabling services to integrate with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    You can only call this operation from the organization's management account and only if the organization has enabled all features.

    " + "documentation":"

    Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

    We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    You can only call this operation from the organization's management account and only if the organization has enabled all features.

    " }, "EnableAllFeatures":{ "name":"EnableAllFeatures", @@ -571,7 +571,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that Organizations supports. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

    After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

    After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

    After you enable all features in your organization, the management account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The management account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that Organizations supports. For more information, see Enabling all features in your organization in the Organizations User Guide.

    This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

    After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

    After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

    After you enable all features in your organization, the management account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The management account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

    This operation can be called only from the organization's management account.

    " }, "EnablePolicyType":{ "name":"EnablePolicyType", @@ -595,7 +595,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account.

    You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

    " + "documentation":"

    Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

    " }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -618,7 +618,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

    • You can invite Amazon Web Services accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more information, see Consolidated Billing in India.

    • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

    • You can invite Amazon Web Services accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more information, see Consolidated billing in India.

    • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " }, "LeaveOrganization":{ "name":"LeaveOrganization", @@ -637,7 +637,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

    This operation can be called only from a member account in the organization.

    • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

    • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

      • Choose a support plan

      • Provide and verify the required contact information

      • Provide a current payment method

      Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    • A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days.

    " + "documentation":"

    Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

    This operation can be called only from a member account in the organization.

    • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

    • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

      • Choose a support plan

      • Provide and verify the required contact information

      • Provide a current payment method

      Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    • A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days.

    • If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization.

    " }, "ListAWSServiceAccessForOrganization":{ "name":"ListAWSServiceAccessForOrganization", @@ -656,7 +656,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

    For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

    For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "ListAccounts":{ "name":"ListAccounts", @@ -1011,7 +1011,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes the specified account from the organization.

    The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

    This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

    • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    " + "documentation":"

    Removes the specified account from the organization.

    The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

    This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

    • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    " }, "TagResource":{ "name":"TagResource", @@ -1030,7 +1030,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Adds one or more tags to the specified resource.

    Currently, you can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Adds one or more tags to the specified resource.

    Currently, you can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "UntagResource":{ "name":"UntagResource", @@ -1049,7 +1049,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes any tags with the specified keys from the specified resource.

    You can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Removes any tags with the specified keys from the specified resource.

    You can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "UpdateOrganizationalUnit":{ "name":"UpdateOrganizationalUnit", @@ -1093,7 +1093,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " } }, "shapes":{ @@ -1129,7 +1129,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

    ", + "documentation":"

    You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

    ", "exception":true }, "AccessDeniedForDependencyException":{ @@ -1239,7 +1239,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You can't invite an existing account to your organization until you verify that you own the email address associated with the management account. For more information, see Email Address Verification in the Organizations User Guide.

    ", + "documentation":"

    You can't invite an existing account to your organization until you verify that you own the email address associated with the management account. For more information, see Email address verification in the Organizations User Guide.

    ", "exception":true }, "AccountStatus":{ @@ -1378,7 +1378,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

    Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

    Some of the reasons in the following list might not be applicable to this specific API or operation.

    • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

    • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

    • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

    • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

      Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

      Deleted and closed accounts still count toward your limit.

      If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

    • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

    • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

    • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

    • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

    • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

    • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

    • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

    • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

    • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

    • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services /> Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

    • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

    • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

    • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

    • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

    • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

    • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

    • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

    • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

    • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

    • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

    • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

    • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

    • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

    • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

    ", + "documentation":"

    Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

    Some of the reasons in the following list might not be applicable to this specific API or operation.

    • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

    • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

    • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

    • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

      Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

      Deleted and closed accounts still count toward your limit.

      If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

    • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

    • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

    • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

    • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

    • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

    • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

    • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

    • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

    • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

    • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

    • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services /> Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

    • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

    • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

    • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

    • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

    • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

    • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

    • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

    • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

    • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

    • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

    • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

    • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

    • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

    • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

    ", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1464,11 +1464,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

    The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " + "documentation":"

    The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

    If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " + "documentation":"

    If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " }, "Tags":{ "shape":"Tags", @@ -1486,7 +1486,7 @@ "members":{ "CreateAccountStatus":{ "shape":"CreateAccountStatus", - "documentation":"

    A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the CloudTrail log for the CreateAccountResult event. For more information, see Monitoring the Activity in Your Organization in the Organizations User Guide.

    " + "documentation":"

    A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the CloudTrail log for the CreateAccountResult event. For more information, see Logging and monitoring in Organizations in the Organizations User Guide.

    " } } }, @@ -1569,11 +1569,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

    (Optional)

    The name of an IAM role that Organizations automatically preconfigures in the new member accounts in both the Amazon Web Services GovCloud (US) Region and in the commercial Region. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the Organizations User Guide and steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web Services accounts Using IAM Roles in the IAM User Guide.

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " + "documentation":"

    (Optional)

    The name of an IAM role that Organizations automatically preconfigures in the new member accounts in both the Amazon Web Services GovCloud (US) Region and in the commercial Region. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

    If set to ALLOW, the new linked account in the commercial Region enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " + "documentation":"

    If set to ALLOW, the new linked account in the commercial Region enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " }, "Tags":{ "shape":"Tags", @@ -1592,7 +1592,7 @@ "members":{ "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

    Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

    • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more information, see Consolidated billing in the Organizations User Guide.

      The consolidated billing feature subset isn't available for organizations in the Amazon Web Services GovCloud (US) Region.

    • ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member account in the organization. For more information, see All features in the Organizations User Guide.

    " + "documentation":"

    Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

    • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more information, see Consolidated billing in the Organizations User Guide.

      The consolidated billing feature subset isn't available for organizations in the Amazon Web Services GovCloud (US) Region.

    • ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member account in the organization. For more information, see All features in the Organizations User Guide.

    " } } }, @@ -2211,7 +2211,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

    The requested operation would violate the constraint identified in the reason code.

    Some of the reasons in the following list might not be applicable to this specific API or operation:

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

      If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

    • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

    • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

    • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

    • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

    • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

    • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

    ", + "documentation":"

    The requested operation would violate the constraint identified in the reason code.

    Some of the reasons in the following list might not be applicable to this specific API or operation:

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

      If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

    • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

    • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

    • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

    • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

    • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

    • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

    ", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -2929,7 +2929,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the Organizations User Guide.

    ", + "documentation":"

    The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see SCP syntax in the Organizations User Guide.

    ", "exception":true }, "MasterCannotLeaveOrganizationException":{ @@ -2986,7 +2986,7 @@ }, "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

    Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    " + "documentation":"

    Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling all features in your organization in the Organizations User Guide.

    " }, "MasterAccountArn":{ "shape":"AccountArn", @@ -3027,7 +3027,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The organization isn't empty. To delete an organization, you must first remove all accounts except the management account, delete all OUs, and delete all policies.

    ", + "documentation":"

    The organization isn't empty. To delete an organization, you must first remove all accounts except the management account.

    ", "exception":true }, "OrganizationalUnit":{ @@ -3280,7 +3280,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Managing Organizations Policiesin the Organizations User Guide.

    ", + "documentation":"

    You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Managing Organizations policiesin the Organizations User Guide.

    ", "exception":true }, "PolicyTypeNotEnabledException":{ @@ -3288,7 +3288,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    ", + "documentation":"

    The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling all features in your organization in the Organizations User Guide.

    ", "exception":true }, "PolicyTypeStatus":{ @@ -3323,7 +3323,7 @@ "members":{ "Content":{ "shape":"ResourcePolicyContent", - "documentation":"

    If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For more information, see Service Control Policy Syntax in the Organizations User Guide.

    " + "documentation":"

    If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For more information, see SCP syntax in the Organizations User Guide.

    " }, "Tags":{ "shape":"Tags", @@ -3581,7 +3581,7 @@ "Type":{"shape":"ExceptionType"}, "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You have sent too many requests in too short a period of time. The quota helps protect against denial-of-service attacks. Try again later.

    For information about quotas that affect Organizations, see Quotas for Organizationsin the Organizations User Guide.

    ", + "documentation":"

    You have sent too many requests in too short a period of time. The quota helps protect against denial-of-service attacks. Try again later.

    For information about quotas that affect Organizations, see Quotas for Organizations in the Organizations User Guide.

    ", "exception":true }, "UnsupportedAPIEndpointException":{ @@ -3650,7 +3650,7 @@ }, "Content":{ "shape":"PolicyContent", - "documentation":"

    If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see Service Control Policy Syntax in the Organizations User Guide.

    " + "documentation":"

    If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see SCP syntax in the Organizations User Guide.

    " } } }, @@ -3664,5 +3664,5 @@ } } }, - "documentation":"

    Organizations is a web service that enables you to consolidate your multiple Amazon Web Services accounts into an organization and centrally manage your accounts and their resources.

    This guide provides descriptions of the Organizations operations. For more information about using this service, see the Organizations User Guide.

    Support and feedback for Organizations

    We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the Organizations support forum. For more information about the Amazon Web Services support forums, see Forums Help.

    Endpoint to call When using the CLI or the Amazon Web Services SDK

    For the current release of Organizations, specify the us-east-1 region for all Amazon Web Services API and CLI calls made from the commercial Amazon Web Services Regions outside of China. If calling from one of the Amazon Web Services Regions in China, then specify cn-northwest-1. You can do this in the CLI by using these parameters and commands:

    • Use the following parameter with each command to specify both the endpoint and its region:

      --endpoint-url https://organizations.us-east-1.amazonaws.com (from commercial Amazon Web Services Regions outside of China)

      or

      --endpoint-url https://organizations.cn-northwest-1.amazonaws.com.cn (from Amazon Web Services Regions in China)

    • Use the default endpoint, but configure your default region with this command:

      aws configure set default.region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      aws configure set default.region cn-northwest-1 (from Amazon Web Services Regions in China)

    • Use the following parameter with each command to specify the endpoint:

      --region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      --region cn-northwest-1 (from Amazon Web Services Regions in China)

    Recording API Requests

    Organizations supports CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Organizations service received, who made the request and when, and so on. For more about Organizations and its support for CloudTrail, see Logging Organizations Events with CloudTrail in the Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

    " + "documentation":"

    Organizations is a web service that enables you to consolidate your multiple Amazon Web Services accounts into an organization and centrally manage your accounts and their resources.

    This guide provides descriptions of the Organizations operations. For more information about using this service, see the Organizations User Guide.

    Support and feedback for Organizations

    We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the Organizations support forum. For more information about the Amazon Web Services support forums, see Forums Help.

    Endpoint to call When using the CLI or the Amazon Web Services SDK

    For the current release of Organizations, specify the us-east-1 region for all Amazon Web Services API and CLI calls made from the commercial Amazon Web Services Regions outside of China. If calling from one of the Amazon Web Services Regions in China, then specify cn-northwest-1. You can do this in the CLI by using these parameters and commands:

    • Use the following parameter with each command to specify both the endpoint and its region:

      --endpoint-url https://organizations.us-east-1.amazonaws.com (from commercial Amazon Web Services Regions outside of China)

      or

      --endpoint-url https://organizations.cn-northwest-1.amazonaws.com.cn (from Amazon Web Services Regions in China)

    • Use the default endpoint, but configure your default region with this command:

      aws configure set default.region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      aws configure set default.region cn-northwest-1 (from Amazon Web Services Regions in China)

    • Use the following parameter with each command to specify the endpoint:

      --region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      --region cn-northwest-1 (from Amazon Web Services Regions in China)

    Recording API Requests

    Organizations supports CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Organizations service received, who made the request and when, and so on. For more about Organizations and its support for CloudTrail, see Logging Organizations API calls with CloudTrail in the Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

    " } diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 3af8f6c3c966..01851db435f3 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index f563efb19f70..2d596e04392b 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index 2304aca3425f..bf5b28c3268d 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index 878f5ee6ebac..a890ce40f4a3 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index 823ab5684fd1..a97d2630a849 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json index b78414e06822..1c130cc08ba7 100644 --- a/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,54 +1,54 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.api.aws" + "url": "https://dataplane.payment-cryptography-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.amazonaws.com" + "url": "https://dataplane.payment-cryptography-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography.us-gov-east-1.api.aws" + "url": "https://dataplane.payment-cryptography.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography.us-gov-east-1.amazonaws.com" + "url": "https://dataplane.payment-cryptography.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -99,108 +99,108 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://dataplane.payment-cryptography-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://dataplane.payment-cryptography.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography.us-iso-east-1.c2s.ic.gov" + "url": "https://dataplane.payment-cryptography.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dataplane.payment-cryptography-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography-fips.us-east-1.amazonaws.com" + "url": "https://dataplane.payment-cryptography-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dataplane.payment-cryptography.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dataplane.payment-cryptography.us-east-1.amazonaws.com" + "url": "https://dataplane.payment-cryptography.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, @@ -210,8 +210,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -223,8 +223,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -234,8 +234,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -247,21 +247,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -272,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json index 9c1f1d630320..d18b7641456e 100644 --- a/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json @@ -670,8 +670,7 @@ "type":"structure", "required":[ "CipherText", - "KeyArn", - "KeyCheckValue" + "KeyArn" ], "members":{ "CipherText":{ @@ -783,7 +782,7 @@ "documentation":"

    The length of a MAC under generation.

    " }, "MessageData":{ - "shape":"HexLengthBetween2And4096", + "shape":"HexEvenLengthBetween2And4096", "documentation":"

    The data for which a MAC is under generation.

    " } } @@ -884,6 +883,13 @@ } } }, + "HexEvenLengthBetween16And32":{ + "type":"string", + "max":32, + "min":16, + "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", + "sensitive":true + }, "HexEvenLengthBetween16And4064":{ "type":"string", "max":4064, @@ -898,6 +904,20 @@ "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", "sensitive":true }, + "HexEvenLengthBetween2And4096":{ + "type":"string", + "max":4096, + "min":2, + "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", + "sensitive":true + }, + "HexEvenLengthBetween4And128":{ + "type":"string", + "max":128, + "min":4, + "pattern":"^(?:[0-9a-fA-F][0-9a-fA-F])+$", + "sensitive":true + }, "HexLength16Or32":{ "type":"string", "max":32, @@ -941,12 +961,6 @@ "min":2, "pattern":"^[0-9a-fA-F]+$" }, - "HexLengthBetween2And4096":{ - "type":"string", - "max":4096, - "min":2, - "pattern":"^[0-9a-fA-F]+$" - }, "HexLengthBetween2And8":{ "type":"string", "max":8, @@ -1679,7 +1693,7 @@ ], "members":{ "EncryptedPinBlock":{ - "shape":"HexLengthBetween16And32", + "shape":"HexEvenLengthBetween16And32", "documentation":"

    The encrypted PIN block data that Amazon Web Services Payment Cryptography translates.

    " }, "IncomingDukptAttributes":{ @@ -1949,7 +1963,7 @@ "documentation":"

    The keyARN of the encryption key that Amazon Web Services Payment Cryptography uses to verify MAC data.

    " }, "Mac":{ - "shape":"HexLengthBetween4And128", + "shape":"HexEvenLengthBetween4And128", "documentation":"

    The MAC being verified.

    " }, "MacLength":{ @@ -1957,7 +1971,7 @@ "documentation":"

    The length of the MAC.

    " }, "MessageData":{ - "shape":"HexLengthBetween2And4096", + "shape":"HexEvenLengthBetween2And4096", "documentation":"

    The data on for which MAC is under verification.

    " }, "VerificationAttributes":{ diff --git a/services/pcaconnectorad/pom.xml b/services/pcaconnectorad/pom.xml new file mode 100644 index 000000000000..0b5c7c8daec0 --- /dev/null +++ b/services/pcaconnectorad/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.144-SNAPSHOT + + pcaconnectorad + AWS Java SDK :: Services :: Pca Connector Ad + The AWS Java SDK for Pca Connector Ad module holds the client classes that are used for + communicating with Pca Connector Ad. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.pcaconnectorad + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..df58dfb6b6b0 --- /dev/null +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-ad.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-tests.json b/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..42408f7295bd --- /dev/null +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-ad.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/paginators-1.json b/services/pcaconnectorad/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..89234776f16b --- /dev/null +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "ListConnectors": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Connectors" + }, + "ListDirectoryRegistrations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DirectoryRegistrations" + }, + "ListServicePrincipalNames": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ServicePrincipalNames" + }, + "ListTemplateGroupAccessControlEntries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AccessControlEntries" + }, + "ListTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Templates" + } + } +} diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/service-2.json b/services/pcaconnectorad/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..8259e515cb12 --- /dev/null +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2836 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"pca-connector-ad", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"PcaConnectorAd", + "serviceId":"Pca Connector Ad", + "signatureVersion":"v4", + "signingName":"pca-connector-ad", + "uid":"pca-connector-ad-2018-05-10" + }, + "operations":{ + "CreateConnector":{ + "name":"CreateConnector", + "http":{ + "method":"POST", + "requestUri":"/connectors", + "responseCode":202 + }, + "input":{"shape":"CreateConnectorRequest"}, + "output":{"shape":"CreateConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates a connector between Amazon Web Services Private CA and an Active Directory. You must specify the private CA, directory ID, and security groups.

    " + }, + "CreateDirectoryRegistration":{ + "name":"CreateDirectoryRegistration", + "http":{ + "method":"POST", + "requestUri":"/directoryRegistrations", + "responseCode":202 + }, + "input":{"shape":"CreateDirectoryRegistrationRequest"}, + "output":{"shape":"CreateDirectoryRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates a directory registration that authorizes communication between Amazon Web Services Private CA and an Active Directory

    " + }, + "CreateServicePrincipalName":{ + "name":"CreateServicePrincipalName", + "http":{ + "method":"POST", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"CreateServicePrincipalNameRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates a service principal name (SPN) for the service account in Active Directory. Kerberos authentication uses SPNs to associate a service instance with a service sign-in account.

    ", + "idempotent":true + }, + "CreateTemplate":{ + "name":"CreateTemplate", + "http":{ + "method":"POST", + "requestUri":"/templates", + "responseCode":200 + }, + "input":{"shape":"CreateTemplateRequest"}, + "output":{"shape":"CreateTemplateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates an Active Directory compatible certificate template. The connectors issues certificates using these templates based on the requester’s Active Directory group membership.

    " + }, + "CreateTemplateGroupAccessControlEntry":{ + "name":"CreateTemplateGroupAccessControlEntry", + "http":{ + "method":"POST", + "requestUri":"/templates/{TemplateArn}/accessControlEntries", + "responseCode":200 + }, + "input":{"shape":"CreateTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Create a group access control entry. Allow or deny Active Directory groups from enrolling and/or autoenrolling with the template based on the group security identifiers (SIDs).

    ", + "idempotent":true + }, + "DeleteConnector":{ + "name":"DeleteConnector", + "http":{ + "method":"DELETE", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteConnectorRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a connector for Active Directory. You must provide the Amazon Resource Name (ARN) of the connector that you want to delete. You can find the ARN by calling the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_ListConnectors action. Deleting a connector does not deregister your directory with Amazon Web Services Private CA. You can deregister your directory by calling the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_DeleteDirectoryRegistration action.

    ", + "idempotent":true + }, + "DeleteDirectoryRegistration":{ + "name":"DeleteDirectoryRegistration", + "http":{ + "method":"DELETE", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteDirectoryRegistrationRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a directory registration. Deleting a directory registration deauthorizes Amazon Web Services Private CA with the directory.

    ", + "idempotent":true + }, + "DeleteServicePrincipalName":{ + "name":"DeleteServicePrincipalName", + "http":{ + "method":"DELETE", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteServicePrincipalNameRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes the service principal name (SPN) used by a connector to authenticate with your Active Directory.

    ", + "idempotent":true + }, + "DeleteTemplate":{ + "name":"DeleteTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/templates/{TemplateArn}", + "responseCode":202 + }, + "input":{"shape":"DeleteTemplateRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a template. Certificates issued using the template are still valid until they are revoked or expired.

    ", + "idempotent":true + }, + "DeleteTemplateGroupAccessControlEntry":{ + "name":"DeleteTemplateGroupAccessControlEntry", + "http":{ + "method":"DELETE", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes a group access control entry.

    ", + "idempotent":true + }, + "GetConnector":{ + "name":"GetConnector", + "http":{ + "method":"GET", + "requestUri":"/connectors/{ConnectorArn}", + "responseCode":200 + }, + "input":{"shape":"GetConnectorRequest"}, + "output":{"shape":"GetConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists information about your connector. You specify the connector on input by its ARN (Amazon Resource Name).

    " + }, + "GetDirectoryRegistration":{ + "name":"GetDirectoryRegistration", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}", + "responseCode":200 + }, + "input":{"shape":"GetDirectoryRegistrationRequest"}, + "output":{"shape":"GetDirectoryRegistrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    A structure that contains information about your directory registration.

    " + }, + "GetServicePrincipalName":{ + "name":"GetServicePrincipalName", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames/{ConnectorArn}", + "responseCode":200 + }, + "input":{"shape":"GetServicePrincipalNameRequest"}, + "output":{"shape":"GetServicePrincipalNameResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the service principal name that the connector uses to authenticate with Active Directory.

    " + }, + "GetTemplate":{ + "name":"GetTemplate", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateRequest"}, + "output":{"shape":"GetTemplateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves a certificate template that the connector uses to issue certificates from a private CA.

    " + }, + "GetTemplateGroupAccessControlEntry":{ + "name":"GetTemplateGroupAccessControlEntry", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateGroupAccessControlEntryRequest"}, + "output":{"shape":"GetTemplateGroupAccessControlEntryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the group access control entries for a template.

    " + }, + "ListConnectors":{ + "name":"ListConnectors", + "http":{ + "method":"GET", + "requestUri":"/connectors", + "responseCode":200 + }, + "input":{"shape":"ListConnectorsRequest"}, + "output":{"shape":"ListConnectorsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the connectors that you created by using the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateConnector action.

    " + }, + "ListDirectoryRegistrations":{ + "name":"ListDirectoryRegistrations", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations", + "responseCode":200 + }, + "input":{"shape":"ListDirectoryRegistrationsRequest"}, + "output":{"shape":"ListDirectoryRegistrationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the directory registrations that you created by using the https://docs.aws.amazon.com/pca-connector-ad/latest/APIReference/API_CreateDirectoryRegistration action.

    " + }, + "ListServicePrincipalNames":{ + "name":"ListServicePrincipalNames", + "http":{ + "method":"GET", + "requestUri":"/directoryRegistrations/{DirectoryRegistrationArn}/servicePrincipalNames", + "responseCode":200 + }, + "input":{"shape":"ListServicePrincipalNamesRequest"}, + "output":{"shape":"ListServicePrincipalNamesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the service principal names that the connector uses to authenticate with Active Directory.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the tags, if any, that are associated with your resource.

    " + }, + "ListTemplateGroupAccessControlEntries":{ + "name":"ListTemplateGroupAccessControlEntries", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateArn}/accessControlEntries", + "responseCode":200 + }, + "input":{"shape":"ListTemplateGroupAccessControlEntriesRequest"}, + "output":{"shape":"ListTemplateGroupAccessControlEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists group access control entries you created.

    " + }, + "ListTemplates":{ + "name":"ListTemplates", + "http":{ + "method":"GET", + "requestUri":"/templates", + "responseCode":200 + }, + "input":{"shape":"ListTemplatesRequest"}, + "output":{"shape":"ListTemplatesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the templates, if any, that are associated with a connector.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Adds one or more tags to your resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Removes one or more tags from your resource.

    ", + "idempotent":true + }, + "UpdateTemplate":{ + "name":"UpdateTemplate", + "http":{ + "method":"PATCH", + "requestUri":"/templates/{TemplateArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateTemplateRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Update template configuration to define the information included in certificates.

    " + }, + "UpdateTemplateGroupAccessControlEntry":{ + "name":"UpdateTemplateGroupAccessControlEntry", + "http":{ + "method":"PATCH", + "requestUri":"/templates/{TemplateArn}/accessControlEntries/{GroupSecurityIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateTemplateGroupAccessControlEntryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Update a group access control entry you created using CreateTemplateGroupAccessControlEntry.

    " + } + }, + "shapes":{ + "AccessControlEntry":{ + "type":"structure", + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"

    Permissions to allow or deny an Active Directory group to enroll or autoenroll certificates issued against a template.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the Access Control Entry was created.

    " + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"

    Name of the Active Directory group. This name does not need to match the group name in Active Directory.

    " + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    " + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the Access Control Entry was updated.

    " + } + }, + "documentation":"

    An access control entry allows or denies Active Directory groups based on their security identifiers (SIDs) from enrolling and/or autoenrolling with the template.

    " + }, + "AccessControlEntryList":{ + "type":"list", + "member":{"shape":"AccessControlEntrySummary"} + }, + "AccessControlEntrySummary":{ + "type":"structure", + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"

    Allow or deny an Active Directory group from enrolling and autoenrolling certificates issued against a template.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the Access Control Entry was created.

    " + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"

    Name of the Active Directory group. This name does not need to match the group name in Active Directory.

    " + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    " + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the Access Control Entry was updated.

    " + } + }, + "documentation":"

    Summary of group access control entries that allow or deny Active Directory groups based on their security identifiers (SIDs) from enrolling and/or autofenrolling with the template.

    " + }, + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    You can receive this error if you attempt to create a resource share when you don't have the required permissions. This can be caused by insufficient permissions in policies attached to your Amazon Web Services Identity and Access Management (IAM) principal. It can also happen because of restrictions in place from an Amazon Web Services Organizations service control policy (SCP) that affects your Amazon Web Services account.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccessRight":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "AccessRights":{ + "type":"structure", + "members":{ + "AutoEnroll":{ + "shape":"AccessRight", + "documentation":"

    Allow or deny an Active Directory group from autoenrolling certificates issued against a template. The Active Directory group must be allowed to enroll to allow autoenrollment

    " + }, + "Enroll":{ + "shape":"AccessRight", + "documentation":"

    Allow or deny an Active Directory group from enrolling certificates issued against a template.

    " + } + }, + "documentation":"

    Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.

    " + }, + "ApplicationPolicies":{ + "type":"structure", + "required":["Policies"], + "members":{ + "Critical":{ + "shape":"Boolean", + "documentation":"

    Marks the application policy extension as critical.

    " + }, + "Policies":{ + "shape":"ApplicationPolicyList", + "documentation":"

    Application policies describe what the certificate can be used for.

    " + } + }, + "documentation":"

    Application policies describe what the certificate can be used for.

    " + }, + "ApplicationPolicy":{ + "type":"structure", + "members":{ + "PolicyObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"

    The object identifier (OID) of an application policy.

    " + }, + "PolicyType":{ + "shape":"ApplicationPolicyType", + "documentation":"

    The type of application policy

    " + } + }, + "documentation":"

    Application policies describe what the certificate can be used for.

    ", + "union":true + }, + "ApplicationPolicyList":{ + "type":"list", + "member":{"shape":"ApplicationPolicy"}, + "max":100, + "min":1 + }, + "ApplicationPolicyType":{ + "type":"string", + "enum":[ + "ALL_APPLICATION_POLICIES", + "ANY_PURPOSE", + "ATTESTATION_IDENTITY_KEY_CERTIFICATE", + "CERTIFICATE_REQUEST_AGENT", + "CLIENT_AUTHENTICATION", + "CODE_SIGNING", + "CTL_USAGE", + "DIGITAL_RIGHTS", + "DIRECTORY_SERVICE_EMAIL_REPLICATION", + "DISALLOWED_LIST", + "DNS_SERVER_TRUST", + "DOCUMENT_ENCRYPTION", + "DOCUMENT_SIGNING", + "DYNAMIC_CODE_GENERATOR", + "EARLY_LAUNCH_ANTIMALWARE_DRIVER", + "EMBEDDED_WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "ENCLAVE", + "ENCRYPTING_FILE_SYSTEM", + "ENDORSEMENT_KEY_CERTIFICATE", + "FILE_RECOVERY", + "HAL_EXTENSION", + "IP_SECURITY_END_SYSTEM", + "IP_SECURITY_IKE_INTERMEDIATE", + "IP_SECURITY_TUNNEL_TERMINATION", + "IP_SECURITY_USER", + "ISOLATED_USER_MODE", + "KDC_AUTHENTICATION", + "KERNEL_MODE_CODE_SIGNING", + "KEY_PACK_LICENSES", + "KEY_RECOVERY", + "KEY_RECOVERY_AGENT", + "LICENSE_SERVER_VERIFICATION", + "LIFETIME_SIGNING", + "MICROSOFT_PUBLISHER", + "MICROSOFT_TIME_STAMPING", + "MICROSOFT_TRUST_LIST_SIGNING", + "OCSP_SIGNING", + "OEM_WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "PLATFORM_CERTIFICATE", + "PREVIEW_BUILD_SIGNING", + "PRIVATE_KEY_ARCHIVAL", + "PROTECTED_PROCESS_LIGHT_VERIFICATION", + "PROTECTED_PROCESS_VERIFICATION", + "QUALIFIED_SUBORDINATION", + "REVOKED_LIST_SIGNER", + "ROOT_PROGRAM_AUTO_UPDATE_CA_REVOCATION", + "ROOT_PROGRAM_AUTO_UPDATE_END_REVOCATION", + "ROOT_PROGRAM_NO_OSCP_FAILOVER_TO_CRL", + "ROOT_LIST_SIGNER", + "SECURE_EMAIL", + "SERVER_AUTHENTICATION", + "SMART_CARD_LOGIN", + "SPC_ENCRYPTED_DIGEST_RETRY_COUNT", + "SPC_RELAXED_PE_MARKER_CHECK", + "TIME_STAMPING", + "WINDOWS_HARDWARE_DRIVER_ATTESTED_VERIFICATION", + "WINDOWS_HARDWARE_DRIVER_EXTENDED_VERIFICATION", + "WINDOWS_HARDWARE_DRIVER_VERIFICATION", + "WINDOWS_HELLO_RECOVERY_KEY_ENCRYPTION", + "WINDOWS_KITS_COMPONENT", + "WINDOWS_RT_VERIFICATION", + "WINDOWS_SOFTWARE_EXTENSION_VERIFICATION", + "WINDOWS_STORE", + "WINDOWS_SYSTEM_COMPONENT_VERIFICATION", + "WINDOWS_TCB_COMPONENT", + "WINDOWS_THIRD_PARTY_APPLICATION_COMPONENT", + "WINDOWS_UPDATE" + ] + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CertificateAuthorityArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:acm-pca:[\\w-]+:[0-9]+:certificate-authority\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "CertificateValidity":{ + "type":"structure", + "required":[ + "RenewalPeriod", + "ValidityPeriod" + ], + "members":{ + "RenewalPeriod":{ + "shape":"ValidityPeriod", + "documentation":"

    Renewal period is the period of time before certificate expiration when a new certificate will be requested.

    " + }, + "ValidityPeriod":{ + "shape":"ValidityPeriod", + "documentation":"

    Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.

    " + } + }, + "documentation":"

    Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.

    " + }, + "ClientCompatibilityV2":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2003", + "WINDOWS_SERVER_2008", + "WINDOWS_SERVER_2008_R2", + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientCompatibilityV3":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2008", + "WINDOWS_SERVER_2008_R2", + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientCompatibilityV4":{ + "type":"string", + "enum":[ + "WINDOWS_SERVER_2012", + "WINDOWS_SERVER_2012_R2", + "WINDOWS_SERVER_2016" + ] + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[!-~]+$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

    The identifier of the Amazon Web Services resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type, which can be one of Connector, Template, TemplateGroupAccessControlEntry, ServicePrincipalName, or DirectoryRegistration.

    " + } + }, + "documentation":"

    This request cannot be completed for one of the following reasons because the requested resource was being concurrently modified by another request.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "Connector":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

    The Amazon Resource Name (ARN) of the certificate authority being used.

    " + }, + "CertificateEnrollmentPolicyServerEndpoint":{ + "shape":"String", + "documentation":"

    Certificate enrollment endpoint for Active Directory domain-joined objects reach out to when requesting certificates.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the connector was created.

    " + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"

    Status of the connector. Status can be creating, active, deleting, or failed.

    " + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"

    Additional information about the connector status if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the connector was updated.

    " + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"

    Information of the VPC and security group(s) used with the connector.

    " + } + }, + "documentation":"

    Amazon Web Services Private CA Connector for Active Directory is a service that links your Active Directory with Amazon Web Services Private CA. The connector brokers the exchange of certificates from Amazon Web Services Private CA to domain-joined users and machines managed with Active Directory.

    " + }, + "ConnectorArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "ConnectorList":{ + "type":"list", + "member":{"shape":"ConnectorSummary"} + }, + "ConnectorStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "ConnectorStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "INTERNAL_FAILURE", + "PRIVATECA_ACCESS_DENIED", + "PRIVATECA_RESOURCE_NOT_FOUND", + "SECURITY_GROUP_NOT_IN_VPC", + "VPC_ACCESS_DENIED", + "VPC_ENDPOINT_LIMIT_EXCEEDED", + "VPC_RESOURCE_NOT_FOUND" + ] + }, + "ConnectorSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

    The Amazon Resource Name (ARN) of the certificate authority being used.

    " + }, + "CertificateEnrollmentPolicyServerEndpoint":{ + "shape":"String", + "documentation":"

    Certificate enrollment endpoint for Active Directory domain-joined objects to request certificates.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the connector was created.

    " + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Status":{ + "shape":"ConnectorStatus", + "documentation":"

    Status of the connector. Status can be creating, active, deleting, or failed.

    " + }, + "StatusReason":{ + "shape":"ConnectorStatusReason", + "documentation":"

    Additional information about the connector status if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the connector was updated.

    " + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"

    Information of the VPC and security group(s) used with the connector.

    " + } + }, + "documentation":"

    Summary description of the Amazon Web Services Private CA AD connectors belonging to an Amazon Web Services account.

    " + }, + "CreateConnectorRequest":{ + "type":"structure", + "required":[ + "CertificateAuthorityArn", + "DirectoryId", + "VpcInformation" + ], + "members":{ + "CertificateAuthorityArn":{ + "shape":"CertificateAuthorityArn", + "documentation":"

    The Amazon Resource Name (ARN) of the certificate authority being used.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    Idempotency token.

    ", + "idempotencyToken":true + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    Metadata assigned to a connector consisting of a key-value pair.

    " + }, + "VpcInformation":{ + "shape":"VpcInformation", + "documentation":"

    Security group IDs that describe the inbound and outbound rules.

    " + } + } + }, + "CreateConnectorResponse":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    If successful, the Amazon Resource Name (ARN) of the connector for Active Directory.

    " + } + } + }, + "CreateDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    Idempotency token.

    ", + "idempotencyToken":true + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    Metadata assigned to a directory registration consisting of a key-value pair.

    " + } + } + }, + "CreateDirectoryRegistrationResponse":{ + "type":"structure", + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    " + } + } + }, + "CreateServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    Idempotency token.

    ", + "idempotencyToken":true + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "CreateTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "AccessRights", + "GroupDisplayName", + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"

    Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.

    " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    Idempotency token.

    ", + "idempotencyToken":true + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"

    Name of the Active Directory group. This name does not need to match the group name in Active Directory.

    " + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    " + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "CreateTemplateRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "Definition", + "Name" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

    Idempotency token.

    ", + "idempotencyToken":true + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "Name":{ + "shape":"TemplateName", + "documentation":"

    Name of the template. The template name must be unique.

    " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    Metadata assigned to a template consisting of a key-value pair.

    " + } + } + }, + "CreateTemplateResponse":{ + "type":"structure", + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    If successful, the Amazon Resource Name (ARN) of the template.

    " + } + } + }, + "CryptoProvidersList":{ + "type":"list", + "member":{"shape":"CryptoProvidersListMemberString"}, + "max":100, + "min":1 + }, + "CryptoProvidersListMemberString":{ + "type":"string", + "max":100, + "min":1 + }, + "CustomObjectIdentifier":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^([0-2])\\.([0-9]|([0-3][0-9]))(\\.([0-9]+)){0,126}$" + }, + "DeleteConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "DeleteDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryRegistrationArn"], + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "DeleteServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "DeleteTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    ", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "DeleteTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "DirectoryId":{ + "type":"string", + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryRegistration":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the directory registration was created.

    " + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Status":{ + "shape":"DirectoryRegistrationStatus", + "documentation":"

    Status of the directory registration.

    " + }, + "StatusReason":{ + "shape":"DirectoryRegistrationStatusReason", + "documentation":"

    Additional information about the directory registration status if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the directory registration was updated.

    " + } + }, + "documentation":"

    The directory registration represents the authorization of the connector service with a directory.

    " + }, + "DirectoryRegistrationArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:directory-registration\\/d-[0-9a-f]{10}$" + }, + "DirectoryRegistrationList":{ + "type":"list", + "member":{"shape":"DirectoryRegistrationSummary"} + }, + "DirectoryRegistrationStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "DirectoryRegistrationStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "DIRECTORY_RESOURCE_NOT_FOUND", + "DIRECTORY_NOT_ACTIVE", + "DIRECTORY_NOT_REACHABLE", + "DIRECTORY_TYPE_NOT_SUPPORTED", + "INTERNAL_FAILURE" + ] + }, + "DirectoryRegistrationSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the directory registration was created.

    " + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

    The identifier of the Active Directory.

    " + }, + "Status":{ + "shape":"DirectoryRegistrationStatus", + "documentation":"

    Status of the directory registration.

    " + }, + "StatusReason":{ + "shape":"DirectoryRegistrationStatusReason", + "documentation":"

    Additional information about the directory registration status if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the directory registration was updated.

    " + } + }, + "documentation":"

    The directory registration represents the authorization of the connector service with the Active Directory.

    " + }, + "DisplayName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[\\x20-\\x7E]+$" + }, + "EnrollmentFlagsV2":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"

    Allow renewal using the same key.

    " + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"

    Include symmetric algorithms allowed by the subject.

    " + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"

    This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.

    " + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"

    Delete expired or revoked certificates instead of archiving them.

    " + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"

    Require user interaction when the subject is enrolled and the private key associated with the certificate is used.

    " + } + }, + "documentation":"

    Template configurations for v2 template schema.

    " + }, + "EnrollmentFlagsV3":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"

    Allow renewal using the same key.

    " + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"

    Include symmetric algorithms allowed by the subject.

    " + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"

    This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.

    " + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"

    Delete expired or revoked certificates instead of archiving them.

    " + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"

    Require user interaction when the subject is enrolled and the private key associated with the certificate is used.

    " + } + }, + "documentation":"

    Template configurations for v3 template schema.

    " + }, + "EnrollmentFlagsV4":{ + "type":"structure", + "members":{ + "EnableKeyReuseOnNtTokenKeysetStorageFull":{ + "shape":"Boolean", + "documentation":"

    Allow renewal using the same key.

    " + }, + "IncludeSymmetricAlgorithms":{ + "shape":"Boolean", + "documentation":"

    Include symmetric algorithms allowed by the subject.

    " + }, + "NoSecurityExtension":{ + "shape":"Boolean", + "documentation":"

    This flag instructs the CA to not include the security extension szOID_NTDS_CA_SECURITY_EXT (OID:1.3.6.1.4.1.311.25.2), as specified in [MS-WCCE] sections 2.2.2.7.7.4 and 3.2.2.6.2.1.4.5.9, in the issued certificate. This addresses a Windows Kerberos elevation-of-privilege vulnerability.

    " + }, + "RemoveInvalidCertificateFromPersonalStore":{ + "shape":"Boolean", + "documentation":"

    Delete expired or revoked certificates instead of archiving them.

    " + }, + "UserInteractionRequired":{ + "shape":"Boolean", + "documentation":"

    Require user interaction when the subject is enrolled and the private key associated with the certificate is used.

    " + } + }, + "documentation":"

    Template configurations for v4 template schema.

    " + }, + "ExtensionsV2":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"

    Application policies specify what the certificate is used for and its purpose.

    " + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"

    The key usage extension defines the purpose (e.g., encipherment, signature, certificate signing) of the key contained in the certificate.

    " + } + }, + "documentation":"

    Certificate extensions for v2 template schema

    " + }, + "ExtensionsV3":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"

    Application policies specify what the certificate is used for and its purpose.

    " + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"

    The key usage extension defines the purpose (e.g., encipherment, signature, certificate signing) of the key contained in the certificate.

    " + } + }, + "documentation":"

    Certificate extensions for v3 template schema

    " + }, + "ExtensionsV4":{ + "type":"structure", + "required":["KeyUsage"], + "members":{ + "ApplicationPolicies":{ + "shape":"ApplicationPolicies", + "documentation":"

    Application policies specify what the certificate is used for and its purpose.

    " + }, + "KeyUsage":{ + "shape":"KeyUsage", + "documentation":"

    The key usage extension defines the purpose (e.g., encipherment, signature) of the key contained in the certificate.

    " + } + }, + "documentation":"

    Certificate extensions for v4 template schema

    " + }, + "GeneralFlagsV2":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"

    Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.

    " + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"

    Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users.

    " + } + }, + "documentation":"

    General flags for v2 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.

    " + }, + "GeneralFlagsV3":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"

    Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.

    " + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"

    Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users

    " + } + }, + "documentation":"

    General flags for v3 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.

    " + }, + "GeneralFlagsV4":{ + "type":"structure", + "members":{ + "AutoEnrollment":{ + "shape":"Boolean", + "documentation":"

    Allows certificate issuance using autoenrollment. Set to TRUE to allow autoenrollment.

    " + }, + "MachineType":{ + "shape":"Boolean", + "documentation":"

    Defines if the template is for machines or users. Set to TRUE if the template is for machines. Set to FALSE if the template is for users

    " + } + }, + "documentation":"

    General flags for v4 template schema that defines if the template is for a machine or a user and if the template can be issued using autoenrollment.

    " + }, + "GetConnectorRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"uri", + "locationName":"ConnectorArn" + } + } + }, + "GetConnectorResponse":{ + "type":"structure", + "members":{ + "Connector":{ + "shape":"Connector", + "documentation":"

    A structure that contains information about your connector.

    " + } + } + }, + "GetDirectoryRegistrationRequest":{ + "type":"structure", + "required":["DirectoryRegistrationArn"], + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "GetDirectoryRegistrationResponse":{ + "type":"structure", + "members":{ + "DirectoryRegistration":{ + "shape":"DirectoryRegistration", + "documentation":"

    The directory registration represents the authorization of the connector service with a directory.

    " + } + } + }, + "GetServicePrincipalNameRequest":{ + "type":"structure", + "required":[ + "ConnectorArn", + "DirectoryRegistrationArn" + ], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"uri", + "locationName":"ConnectorArn" + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + } + } + }, + "GetServicePrincipalNameResponse":{ + "type":"structure", + "members":{ + "ServicePrincipalName":{ + "shape":"ServicePrincipalName", + "documentation":"

    The service principal name that the connector uses to authenticate with Active Directory.

    " + } + } + }, + "GetTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    ", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "GetTemplateGroupAccessControlEntryResponse":{ + "type":"structure", + "members":{ + "AccessControlEntry":{ + "shape":"AccessControlEntry", + "documentation":"

    An access control entry allows or denies an Active Directory group from enrolling and/or autoenrolling with a template.

    " + } + } + }, + "GetTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "GetTemplateResponse":{ + "type":"structure", + "members":{ + "Template":{ + "shape":"Template", + "documentation":"

    A certificate template that the connector uses to issue certificates from a private CA.

    " + } + } + }, + "GroupSecurityIdentifier":{ + "type":"string", + "max":256, + "min":7, + "pattern":"^S-[0-9]-([0-9]+-){1,14}[0-9]+$" + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "SHA256", + "SHA384", + "SHA512" + ] + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

    The request processing has failed because of an unknown error, exception or failure with an internal server.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "KeySpec":{ + "type":"string", + "enum":[ + "KEY_EXCHANGE", + "SIGNATURE" + ] + }, + "KeyUsage":{ + "type":"structure", + "required":["UsageFlags"], + "members":{ + "Critical":{ + "shape":"Boolean", + "documentation":"

    Sets the key usage extension to critical.

    " + }, + "UsageFlags":{ + "shape":"KeyUsageFlags", + "documentation":"

    The key usage flags represent the purpose (e.g., encipherment, signature) of the key contained in the certificate.

    " + } + }, + "documentation":"

    The key usage extension defines the purpose (e.g., encipherment, signature) of the key contained in the certificate.

    " + }, + "KeyUsageFlags":{ + "type":"structure", + "members":{ + "DataEncipherment":{ + "shape":"Boolean", + "documentation":"

    DataEncipherment is asserted when the subject public key is used for directly enciphering raw user data without the use of an intermediate symmetric cipher.

    " + }, + "DigitalSignature":{ + "shape":"Boolean", + "documentation":"

    The digitalSignature is asserted when the subject public key is used for verifying digital signatures.

    " + }, + "KeyAgreement":{ + "shape":"Boolean", + "documentation":"

    KeyAgreement is asserted when the subject public key is used for key agreement.

    " + }, + "KeyEncipherment":{ + "shape":"Boolean", + "documentation":"

    KeyEncipherment is asserted when the subject public key is used for enciphering private or secret keys, i.e., for key transport.

    " + }, + "NonRepudiation":{ + "shape":"Boolean", + "documentation":"

    NonRepudiation is asserted when the subject public key is used to verify digital signatures.

    " + } + }, + "documentation":"

    The key usage flags represent the purpose (e.g., encipherment, signature) of the key contained in the certificate.

    " + }, + "KeyUsageProperty":{ + "type":"structure", + "members":{ + "PropertyFlags":{ + "shape":"KeyUsagePropertyFlags", + "documentation":"

    You can specify key usage for encryption, key agreement, and signature. You can use property flags or property type but not both.

    " + }, + "PropertyType":{ + "shape":"KeyUsagePropertyType", + "documentation":"

    You can specify all key usages using property type ALL. You can use property type or property flags but not both.

    " + } + }, + "documentation":"

    The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.

    ", + "union":true + }, + "KeyUsagePropertyFlags":{ + "type":"structure", + "members":{ + "Decrypt":{ + "shape":"Boolean", + "documentation":"

    Allows key for encryption and decryption.

    " + }, + "KeyAgreement":{ + "shape":"Boolean", + "documentation":"

    Allows key exchange without encryption.

    " + }, + "Sign":{ + "shape":"Boolean", + "documentation":"

    Allow key use for digital signature.

    " + } + }, + "documentation":"

    Specifies key usage.

    " + }, + "KeyUsagePropertyType":{ + "type":"string", + "enum":["ALL"] + }, + "ListConnectorsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListConnectorsResponse":{ + "type":"structure", + "members":{ + "Connectors":{ + "shape":"ConnectorList", + "documentation":"

    Summary information about each connector you have created.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    " + } + } + }, + "ListDirectoryRegistrationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListDirectoryRegistrationsResponse":{ + "type":"structure", + "members":{ + "DirectoryRegistrations":{ + "shape":"DirectoryRegistrationList", + "documentation":"

    Summary information about each directory registration you have created.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    " + } + } + }, + "ListServicePrincipalNamesRequest":{ + "type":"structure", + "required":["DirectoryRegistrationArn"], + "members":{ + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    ", + "location":"uri", + "locationName":"DirectoryRegistrationArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListServicePrincipalNamesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    " + }, + "ServicePrincipalNames":{ + "shape":"ServicePrincipalNameList", + "documentation":"

    The service principal name, if any, that the connector uses to authenticate with Active Directory.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you created the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

    The tags, if any, that are associated with your resource.

    " + } + } + }, + "ListTemplateGroupAccessControlEntriesRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "ListTemplateGroupAccessControlEntriesResponse":{ + "type":"structure", + "members":{ + "AccessControlEntries":{ + "shape":"AccessControlEntryList", + "documentation":"

    An access control entry grants or denies permission to an Active Directory group to enroll certificates for a template.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    " + } + } + }, + "ListTemplatesRequest":{ + "type":"structure", + "required":["ConnectorArn"], + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    ", + "location":"querystring", + "locationName":"ConnectorArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListTemplatesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    Use this parameter when paginating results in a subsequent request after you receive a response with truncated results. Set it to the value of the NextToken parameter from the response you just received.

    " + }, + "Templates":{ + "shape":"TemplateList", + "documentation":"

    Custom configuration templates used when issuing a certificate.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?$" + }, + "PrivateKeyAlgorithm":{ + "type":"string", + "enum":[ + "RSA", + "ECDH_P256", + "ECDH_P384", + "ECDH_P521" + ] + }, + "PrivateKeyAttributesV2":{ + "type":"structure", + "required":[ + "KeySpec", + "MinimalKeyLength" + ], + "members":{ + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"

    Defines the cryptographic providers used to generate the private key.

    " + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"

    Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.

    " + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV2MinimalKeyLengthInteger", + "documentation":"

    Set the minimum key length of the private key.

    " + } + }, + "documentation":"

    Defines the attributes of the private key.

    " + }, + "PrivateKeyAttributesV2MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyAttributesV3":{ + "type":"structure", + "required":[ + "Algorithm", + "KeySpec", + "KeyUsageProperty", + "MinimalKeyLength" + ], + "members":{ + "Algorithm":{ + "shape":"PrivateKeyAlgorithm", + "documentation":"

    Defines the algorithm used to generate the private key.

    " + }, + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"

    Defines the cryptographic providers used to generate the private key.

    " + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"

    Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.

    " + }, + "KeyUsageProperty":{ + "shape":"KeyUsageProperty", + "documentation":"

    The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.

    " + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV3MinimalKeyLengthInteger", + "documentation":"

    Set the minimum key length of the private key.

    " + } + }, + "documentation":"

    Defines the attributes of the private key.

    " + }, + "PrivateKeyAttributesV3MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyAttributesV4":{ + "type":"structure", + "required":[ + "KeySpec", + "MinimalKeyLength" + ], + "members":{ + "Algorithm":{ + "shape":"PrivateKeyAlgorithm", + "documentation":"

    Defines the algorithm used to generate the private key.

    " + }, + "CryptoProviders":{ + "shape":"CryptoProvidersList", + "documentation":"

    Defines the cryptographic providers used to generate the private key.

    " + }, + "KeySpec":{ + "shape":"KeySpec", + "documentation":"

    Defines the purpose of the private key. Set it to \"KEY_EXCHANGE\" or \"SIGNATURE\" value.

    " + }, + "KeyUsageProperty":{ + "shape":"KeyUsageProperty", + "documentation":"

    The key usage property defines the purpose of the private key contained in the certificate. You can specify specific purposes using property flags or all by using property type ALL.

    " + }, + "MinimalKeyLength":{ + "shape":"PrivateKeyAttributesV4MinimalKeyLengthInteger", + "documentation":"

    Set the minimum key length of the private key.

    " + } + }, + "documentation":"

    Defines the attributes of the private key.

    " + }, + "PrivateKeyAttributesV4MinimalKeyLengthInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "PrivateKeyFlagsV2":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV2", + "documentation":"

    Defines the minimum client compatibility.

    " + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"

    Allows the private key to be exported.

    " + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"

    Require user input when using the private key for enrollment.

    " + } + }, + "documentation":"

    Private key flags for v2 templates specify the client compatibility, if the private key can be exported, and if user input is required when using a private key.

    " + }, + "PrivateKeyFlagsV3":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV3", + "documentation":"

    Defines the minimum client compatibility.

    " + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"

    Allows the private key to be exported.

    " + }, + "RequireAlternateSignatureAlgorithm":{ + "shape":"Boolean", + "documentation":"

    Reguires the PKCS #1 v2.1 signature format for certificates. You should verify that your CA, objects, and applications can accept this signature format.

    " + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"

    Requirer user input when using the private key for enrollment.

    " + } + }, + "documentation":"

    Private key flags for v3 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, and if an alternate signature algorithm should be used.

    " + }, + "PrivateKeyFlagsV4":{ + "type":"structure", + "required":["ClientVersion"], + "members":{ + "ClientVersion":{ + "shape":"ClientCompatibilityV4", + "documentation":"

    Defines the minimum client compatibility.

    " + }, + "ExportableKey":{ + "shape":"Boolean", + "documentation":"

    Allows the private key to be exported.

    " + }, + "RequireAlternateSignatureAlgorithm":{ + "shape":"Boolean", + "documentation":"

    Requires the PKCS #1 v2.1 signature format for certificates. You should verify that your CA, objects, and applications can accept this signature format.

    " + }, + "RequireSameKeyRenewal":{ + "shape":"Boolean", + "documentation":"

    Renew certificate using the same private key.

    " + }, + "StrongKeyProtectionRequired":{ + "shape":"Boolean", + "documentation":"

    Require user input when using the private key for enrollment.

    " + }, + "UseLegacyProvider":{ + "shape":"Boolean", + "documentation":"

    Specifies the cryptographic service provider category used to generate private keys. Set to TRUE to use Legacy Cryptographic Service Providers and FALSE to use Key Storage Providers.

    " + } + }, + "documentation":"

    Private key flags for v4 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, if an alternate signature algorithm should be used, and if certificates are renewed using the same private key.

    " + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

    The identifier of the Amazon Web Services resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type, which can be one of Connector, Template, TemplateGroupAccessControlEntry, ServicePrincipalName, or DirectoryRegistration.

    " + } + }, + "documentation":"

    The operation tried to access a nonexistent resource. The resource might not be specified correctly, or its status might not be ACTIVE.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "max":20, + "min":11, + "pattern":"^(?:sg-[0-9a-f]{8}|sg-[0-9a-f]{17})$" + }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":4, + "min":1 + }, + "ServicePrincipalName":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.html.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the service principal name was created.

    " + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    " + }, + "Status":{ + "shape":"ServicePrincipalNameStatus", + "documentation":"

    The status of a service principal name.

    " + }, + "StatusReason":{ + "shape":"ServicePrincipalNameStatusReason", + "documentation":"

    Additional information for the status of a service principal name if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the service principal name was updated.

    " + } + }, + "documentation":"

    The service principal name that the connector uses to authenticate with Active Directory.

    " + }, + "ServicePrincipalNameList":{ + "type":"list", + "member":{"shape":"ServicePrincipalNameSummary"} + }, + "ServicePrincipalNameStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, + "ServicePrincipalNameStatusReason":{ + "type":"string", + "enum":[ + "DIRECTORY_ACCESS_DENIED", + "DIRECTORY_NOT_REACHABLE", + "DIRECTORY_RESOURCE_NOT_FOUND", + "SPN_EXISTS_ON_DIFFERENT_AD_OBJECT", + "INTERNAL_FAILURE" + ] + }, + "ServicePrincipalNameSummary":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the service principal name was created.

    " + }, + "DirectoryRegistrationArn":{ + "shape":"DirectoryRegistrationArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateDirectoryRegistration.

    " + }, + "Status":{ + "shape":"ServicePrincipalNameStatus", + "documentation":"

    The status of a service principal name.

    " + }, + "StatusReason":{ + "shape":"ServicePrincipalNameStatusReason", + "documentation":"

    Additional information for the status of a service principal name if the status is failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    Time when the service principal name was updated.

    " + } + }, + "documentation":"

    The service principal name that the connector uses to authenticate with Active Directory.

    " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "QuotaCode", + "ResourceId", + "ResourceType", + "ServiceCode" + ], + "members":{ + "Message":{"shape":"String"}, + "QuotaCode":{ + "shape":"String", + "documentation":"

    The code associated with the service quota.

    " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

    The identifier of the Amazon Web Services resource.

    " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

    The resource type, which can be one of Connector, Template, TemplateGroupAccessControlEntry, ServicePrincipalName, or DirectoryRegistration.

    " + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

    Identifies the originating service.

    " + } + }, + "documentation":"

    Request would cause a service quota to be exceeded.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "SubjectNameFlagsV2":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"

    Include the common name in the subject name.

    " + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"

    Include the directory path in the subject name.

    " + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"

    Include the DNS as common name in the subject name.

    " + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject name.

    " + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"

    Include the globally unique identifier (GUID) in the subject alternate name.

    " + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"

    Include the DNS in the subject alternate name.

    " + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"

    Include the domain DNS in the subject alternate name.

    " + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject alternate name.

    " + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"

    Include the service principal name (SPN) in the subject alternate name.

    " + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"

    Include the user principal name (UPN) in the subject alternate name.

    " + } + }, + "documentation":"

    Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.

    " + }, + "SubjectNameFlagsV3":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"

    Include the common name in the subject name.

    " + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"

    Include the directory path in the subject name.

    " + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"

    Include the DNS as common name in the subject name.

    " + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject name.

    " + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"

    Include the globally unique identifier (GUID) in the subject alternate name.

    " + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"

    Include the DNS in the subject alternate name.

    " + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"

    Include the domain DNS in the subject alternate name.

    " + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject alternate name.

    " + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"

    Include the service principal name (SPN) in the subject alternate name.

    " + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"

    Include the user principal name (UPN) in the subject alternate name.

    " + } + }, + "documentation":"

    Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.

    " + }, + "SubjectNameFlagsV4":{ + "type":"structure", + "members":{ + "RequireCommonName":{ + "shape":"Boolean", + "documentation":"

    Include the common name in the subject name.

    " + }, + "RequireDirectoryPath":{ + "shape":"Boolean", + "documentation":"

    Include the directory path in the subject name.

    " + }, + "RequireDnsAsCn":{ + "shape":"Boolean", + "documentation":"

    Include the DNS as common name in the subject name.

    " + }, + "RequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject name.

    " + }, + "SanRequireDirectoryGuid":{ + "shape":"Boolean", + "documentation":"

    Include the globally unique identifier (GUID) in the subject alternate name.

    " + }, + "SanRequireDns":{ + "shape":"Boolean", + "documentation":"

    Include the DNS in the subject alternate name.

    " + }, + "SanRequireDomainDns":{ + "shape":"Boolean", + "documentation":"

    Include the domain DNS in the subject alternate name.

    " + }, + "SanRequireEmail":{ + "shape":"Boolean", + "documentation":"

    Include the subject's email in the subject alternate name.

    " + }, + "SanRequireSpn":{ + "shape":"Boolean", + "documentation":"

    Include the service principal name (SPN) in the subject alternate name.

    " + }, + "SanRequireUpn":{ + "shape":"Boolean", + "documentation":"

    Include the user principal name (UPN) in the subject alternate name.

    " + } + }, + "documentation":"

    Information to include in the subject name and alternate subject name of the certificate. The subject name can be common name, directory path, DNS as common name, or left blank. You can optionally include email to the subject name for user templates. If you leave the subject name blank then you must set a subject alternate name. The subject alternate name (SAN) can include globally unique identifier (GUID), DNS, domain DNS, email, service principal name (SPN), and user principal name (UPN). You can leave the SAN blank. If you leave the SAN blank, then you must set a subject name.

    " + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you created the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

    Metadata assigned to a directory registration consisting of a key-value pair.

    " + } + } + }, + "Tags":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Template":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    " + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the template was created.

    " + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "Name":{ + "shape":"TemplateName", + "documentation":"

    Name of the templates. Template names must be unique.

    " + }, + "ObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"

    Object identifier of a template.

    " + }, + "PolicySchema":{ + "shape":"Integer", + "documentation":"

    The template schema version. Template schema versions can be v2, v3, or v4. The template configuration options change based on the template schema version.

    " + }, + "Revision":{ + "shape":"TemplateRevision", + "documentation":"

    The version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.

    " + }, + "Status":{ + "shape":"TemplateStatus", + "documentation":"

    Status of the template. Status can be creating, active, deleting, or failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the template was updated.

    " + } + }, + "documentation":"

    An Active Directory compatible certificate template. Connectors issue certificates against these templates based on the requestor's Active Directory group membership.

    " + }, + "TemplateArn":{ + "type":"string", + "max":200, + "min":5, + "pattern":"^arn:[\\w-]+:pca-connector-ad:[\\w-]+:[0-9]+:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/template\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + }, + "TemplateDefinition":{ + "type":"structure", + "members":{ + "TemplateV2":{ + "shape":"TemplateV2", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "TemplateV3":{ + "shape":"TemplateV3", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "TemplateV4":{ + "shape":"TemplateV4", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + } + }, + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    ", + "union":true + }, + "TemplateList":{ + "type":"list", + "member":{"shape":"TemplateSummary"} + }, + "TemplateName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^(?!^\\s+$)((?![\\x5c'\\x2b,;<=>#\\x22])([\\x20-\\x7E]))+$" + }, + "TemplateNameList":{ + "type":"list", + "member":{"shape":"TemplateName"}, + "max":100, + "min":1 + }, + "TemplateRevision":{ + "type":"structure", + "required":[ + "MajorRevision", + "MinorRevision" + ], + "members":{ + "MajorRevision":{ + "shape":"Integer", + "documentation":"

    The revision version of the template. Re-enrolling all certificate holders will increment the major revision.

    " + }, + "MinorRevision":{ + "shape":"Integer", + "documentation":"

    The revision version of the template. Re-enrolling all certificate holders will increment the major revision.

    " + } + }, + "documentation":"

    The revision version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.

    " + }, + "TemplateStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    " + }, + "ConnectorArn":{ + "shape":"ConnectorArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateConnector.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the template was created.

    " + }, + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "Name":{ + "shape":"TemplateName", + "documentation":"

    Name of the template. The template name must be unique.

    " + }, + "ObjectIdentifier":{ + "shape":"CustomObjectIdentifier", + "documentation":"

    Object identifier of a template.

    " + }, + "PolicySchema":{ + "shape":"Integer", + "documentation":"

    The template schema version. Template schema versions can be v2, v3, or v4. The template configuration options change based on the template schema version.

    " + }, + "Revision":{ + "shape":"TemplateRevision", + "documentation":"

    The revision version of the template. Template updates will increment the minor revision. Re-enrolling all certificate holders will increment the major revision.

    " + }, + "Status":{ + "shape":"TemplateStatus", + "documentation":"

    Status of the template. Status can be creating, active, deleting, or failed.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the template was updated.

    " + } + }, + "documentation":"

    An Active Directory compatible certificate template. Connectors issue certificates against these templates based on the requestor's Active Directory group membership.

    " + }, + "TemplateV2":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"

    Certificate validity describes the validity and renewal periods of a certificate.

    " + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV2", + "documentation":"

    Enrollment flags describe the enrollment settings for certificates such as using the existing private key and deleting expired or revoked certificates.

    " + }, + "Extensions":{ + "shape":"ExtensionsV2", + "documentation":"

    Extensions describe the key usage extensions and application policies for a template.

    " + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV2", + "documentation":"

    General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.

    " + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV2", + "documentation":"

    Private key attributes allow you to specify the minimal key length, key spec, and cryptographic providers for the private key of a certificate for v2 templates. V2 templates allow you to use Legacy Cryptographic Service Providers.

    " + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV2", + "documentation":"

    Private key flags for v2 templates specify the client compatibility, if the private key can be exported, and if user input is required when using a private key.

    " + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV2", + "documentation":"

    Subject name flags describe the subject name and subject alternate name that is included in a certificate.

    " + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"

    List of templates in Active Directory that are superseded by this template.

    " + } + }, + "documentation":"

    v2 template schema that uses Legacy Cryptographic Providers.

    " + }, + "TemplateV3":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "HashAlgorithm", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"

    Certificate validity describes the validity and renewal periods of a certificate.

    " + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV3", + "documentation":"

    Enrollment flags describe the enrollment settings for certificates such as using the existing private key and deleting expired or revoked certificates.

    " + }, + "Extensions":{ + "shape":"ExtensionsV3", + "documentation":"

    Extensions describe the key usage extensions and application policies for a template.

    " + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV3", + "documentation":"

    General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.

    " + }, + "HashAlgorithm":{ + "shape":"HashAlgorithm", + "documentation":"

    Specifies the hash algorithm used to hash the private key.

    " + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV3", + "documentation":"

    Private key attributes allow you to specify the algorithm, minimal key length, key spec, key usage, and cryptographic providers for the private key of a certificate for v3 templates. V3 templates allow you to use Key Storage Providers.

    " + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV3", + "documentation":"

    Private key flags for v3 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, and if an alternate signature algorithm should be used.

    " + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV3", + "documentation":"

    Subject name flags describe the subject name and subject alternate name that is included in a certificate.

    " + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"

    List of templates in Active Directory that are superseded by this template.

    " + } + }, + "documentation":"

    v3 template schema that uses Key Storage Providers.

    " + }, + "TemplateV4":{ + "type":"structure", + "required":[ + "CertificateValidity", + "EnrollmentFlags", + "Extensions", + "GeneralFlags", + "PrivateKeyAttributes", + "PrivateKeyFlags", + "SubjectNameFlags" + ], + "members":{ + "CertificateValidity":{ + "shape":"CertificateValidity", + "documentation":"

    Certificate validity describes the validity and renewal periods of a certificate.

    " + }, + "EnrollmentFlags":{ + "shape":"EnrollmentFlagsV4", + "documentation":"

    Enrollment flags describe the enrollment settings for certificates using the existing private key and deleting expired or revoked certificates.

    " + }, + "Extensions":{ + "shape":"ExtensionsV4", + "documentation":"

    Extensions describe the key usage extensions and application policies for a template.

    " + }, + "GeneralFlags":{ + "shape":"GeneralFlagsV4", + "documentation":"

    General flags describe whether the template is used for computers or users and if the template can be used with autoenrollment.

    " + }, + "HashAlgorithm":{ + "shape":"HashAlgorithm", + "documentation":"

    Specifies the hash algorithm used to hash the private key. Hash algorithm can only be specified when using Key Storage Providers.

    " + }, + "PrivateKeyAttributes":{ + "shape":"PrivateKeyAttributesV4", + "documentation":"

    Private key attributes allow you to specify the minimal key length, key spec, key usage, and cryptographic providers for the private key of a certificate for v4 templates. V4 templates allow you to use either Key Storage Providers or Legacy Cryptographic Service Providers. You specify the cryptography provider category in private key flags.

    " + }, + "PrivateKeyFlags":{ + "shape":"PrivateKeyFlagsV4", + "documentation":"

    Private key flags for v4 templates specify the client compatibility, if the private key can be exported, if user input is required when using a private key, if an alternate signature algorithm should be used, and if certificates are renewed using the same private key.

    " + }, + "SubjectNameFlags":{ + "shape":"SubjectNameFlagsV4", + "documentation":"

    Subject name flags describe the subject name and subject alternate name that is included in a certificate.

    " + }, + "SupersededTemplates":{ + "shape":"TemplateNameList", + "documentation":"

    List of templates in Active Directory that are superseded by this template.

    " + } + }, + "documentation":"

    v4 template schema that can use either Legacy Cryptographic Providers or Key Storage Providers.

    " + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "QuotaCode":{ + "shape":"String", + "documentation":"

    The code associated with the quota.

    " + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

    Identifies the originating service.

    " + } + }, + "documentation":"

    The limit on the number of requests per second was exceeded.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you created the resource.

    ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    Specifies a list of tag keys that you want to remove from the specified resources.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UpdateTemplateGroupAccessControlEntryRequest":{ + "type":"structure", + "required":[ + "GroupSecurityIdentifier", + "TemplateArn" + ], + "members":{ + "AccessRights":{ + "shape":"AccessRights", + "documentation":"

    Allow or deny permissions for an Active Directory group to enroll or autoenroll certificates for a template.

    " + }, + "GroupDisplayName":{ + "shape":"DisplayName", + "documentation":"

    Name of the Active Directory group. This name does not need to match the group name in Active Directory.

    " + }, + "GroupSecurityIdentifier":{ + "shape":"GroupSecurityIdentifier", + "documentation":"

    Security identifier (SID) of the group object from Active Directory. The SID starts with \"S-\".

    ", + "location":"uri", + "locationName":"GroupSecurityIdentifier" + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "UpdateTemplateRequest":{ + "type":"structure", + "required":["TemplateArn"], + "members":{ + "Definition":{ + "shape":"TemplateDefinition", + "documentation":"

    Template configuration to define the information included in certificates. Define certificate validity and renewal periods, certificate request handling and enrollment options, key usage extensions, application policies, and cryptography settings.

    " + }, + "ReenrollAllCertificateHolders":{ + "shape":"Boolean", + "documentation":"

    This setting allows the major version of a template to be increased automatically. All members of Active Directory groups that are allowed to enroll with a template will receive a new certificate issued using that template.

    " + }, + "TemplateArn":{ + "shape":"TemplateArn", + "documentation":"

    The Amazon Resource Name (ARN) that was returned when you called CreateTemplate.

    ", + "location":"uri", + "locationName":"TemplateArn" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"String"}, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the validation error. This won't be return for every validation exception.

    " + } + }, + "documentation":"

    An input validation error occurred. For example, invalid characters in a template name, or if a pagination token is invalid.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "FIELD_VALIDATION_FAILED", + "INVALID_PERMISSION", + "INVALID_STATE", + "MISMATCHED_CONNECTOR", + "MISMATCHED_VPC", + "NO_CLIENT_TOKEN", + "UNKNOWN_OPERATION", + "OTHER" + ] + }, + "ValidityPeriod":{ + "type":"structure", + "required":[ + "Period", + "PeriodType" + ], + "members":{ + "Period":{ + "shape":"ValidityPeriodPeriodLong", + "documentation":"

    The numeric value for the validity period.

    " + }, + "PeriodType":{ + "shape":"ValidityPeriodType", + "documentation":"

    The unit of time. You can select hours, days, weeks, months, and years.

    " + } + }, + "documentation":"

    Information describing the end of the validity period of the certificate. This parameter sets the “Not After” date for the certificate. Certificate validity is the period of time during which a certificate is valid. Validity can be expressed as an explicit date and time when the certificate expires, or as a span of time after issuance, stated in hours, days, months, or years. For more information, see Validity in RFC 5280. This value is unaffected when ValidityNotBefore is also specified. For example, if Validity is set to 20 days in the future, the certificate will expire 20 days from issuance time regardless of the ValidityNotBefore value.

    " + }, + "ValidityPeriodPeriodLong":{ + "type":"long", + "box":true, + "max":8766000, + "min":1 + }, + "ValidityPeriodType":{ + "type":"string", + "enum":[ + "HOURS", + "DAYS", + "WEEKS", + "MONTHS", + "YEARS" + ] + }, + "VpcInformation":{ + "type":"structure", + "required":["SecurityGroupIds"], + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

    The security groups used with the connector. You can use a maximum of 4 security groups with a connector.

    " + } + }, + "documentation":"

    Information about your VPC and security groups used with the connector.

    " + } + }, + "documentation":"

    Amazon Web Services Private CA Connector for Active Directory creates a connector between Amazon Web Services Private CA and Active Directory (AD) that enables you to provision security certificates for AD signed by a private CA that you own. For more information, see Amazon Web Services Private CA Connector for Active Directory.

    " +} diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 230c5c400818..35a5dadc15c0 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index a3ce68c90e81..777cecd65bfa 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 3377e1fc0689..6b3706ca3172 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index b2d591c3f86b..843b7da112f8 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json index ca59653fa9b9..2a013d4fe262 100644 --- a/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://pi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://pi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://pi-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://pi-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://pi.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://pi.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://pi.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://pi.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/pi/src/main/resources/codegen-resources/endpoint-tests.json b/services/pi/src/main/resources/codegen-resources/endpoint-tests.json index a720fcbb69af..73734bfa14e1 100644 --- a/services/pi/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/pi/src/main/resources/codegen-resources/endpoint-tests.json @@ -429,6 +429,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -442,6 +453,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -455,6 +477,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -468,6 +501,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -531,6 +575,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/pi/src/main/resources/codegen-resources/paginators-1.json b/services/pi/src/main/resources/codegen-resources/paginators-1.json index 8392da5c25bf..be1a4c984e95 100644 --- a/services/pi/src/main/resources/codegen-resources/paginators-1.json +++ b/services/pi/src/main/resources/codegen-resources/paginators-1.json @@ -19,6 +19,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListPerformanceAnalysisReports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/pi/src/main/resources/codegen-resources/service-2.json b/services/pi/src/main/resources/codegen-resources/service-2.json index 4a2840b5f600..d80c7e263126 100644 --- a/services/pi/src/main/resources/codegen-resources/service-2.json +++ b/services/pi/src/main/resources/codegen-resources/service-2.json @@ -14,6 +14,36 @@ "uid":"pi-2018-02-27" }, "operations":{ + "CreatePerformanceAnalysisReport":{ + "name":"CreatePerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePerformanceAnalysisReportRequest"}, + "output":{"shape":"CreatePerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Creates a new performance analysis report for a specific time period for the DB instance.

    " + }, + "DeletePerformanceAnalysisReport":{ + "name":"DeletePerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePerformanceAnalysisReportRequest"}, + "output":{"shape":"DeletePerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Deletes a performance analysis report.

    " + }, "DescribeDimensionKeys":{ "name":"DescribeDimensionKeys", "http":{ @@ -44,6 +74,21 @@ ], "documentation":"

    Get the attributes of the specified dimension group for a DB instance or data source. For example, if you specify a SQL ID, GetDimensionKeyDetails retrieves the full text of the dimension db.sql.statement associated with this ID. This operation is useful because GetResourceMetrics and DescribeDimensionKeys don't support retrieval of large SQL statement text.

    " }, + "GetPerformanceAnalysisReport":{ + "name":"GetPerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPerformanceAnalysisReportRequest"}, + "output":{"shape":"GetPerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Retrieves the report including the report ID, status, time details, and the insights with recommendations. The report status can be RUNNING, SUCCEEDED, or FAILED. The insights include the description and recommendation fields.

    " + }, "GetResourceMetadata":{ "name":"GetResourceMetadata", "http":{ @@ -103,9 +148,73 @@ {"shape":"NotAuthorizedException"} ], "documentation":"

    Retrieve metrics of the specified types that can be queried for a specified DB instance.

    " + }, + "ListPerformanceAnalysisReports":{ + "name":"ListPerformanceAnalysisReports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPerformanceAnalysisReportsRequest"}, + "output":{"shape":"ListPerformanceAnalysisReportsResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Lists all the analysis reports created for the DB instance. The reports are sorted based on the start time of each report.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Retrieves all the metadata tags associated with Amazon RDS Performance Insights resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Adds metadata tags to the Amazon RDS Performance Insights resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Deletes the metadata tags from the Amazon RDS Performance Insights resource.

    " } }, "shapes":{ + "AcceptLanguage":{ + "type":"string", + "enum":["EN_US"] + }, "AdditionalMetricsList":{ "type":"list", "member":{"shape":"RequestString"}, @@ -117,6 +226,161 @@ "key":{"shape":"RequestString"}, "value":{"shape":"Double"} }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:.*:pi:.*$" + }, + "AnalysisReport":{ + "type":"structure", + "required":["AnalysisReportId"], + "members":{ + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    The name of the analysis report.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    The unique identifier of the analysis report.

    " + }, + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " + }, + "CreateTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The time you created the analysis report.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The analysis start time in the report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The analysis end time in the report.

    " + }, + "Status":{ + "shape":"AnalysisStatus", + "documentation":"

    The status of the created analysis report.

    " + }, + "Insights":{ + "shape":"InsightList", + "documentation":"

    The list of identified insights in the analysis report.

    " + } + }, + "documentation":"

    Retrieves the summary of the performance analysis report created for a time period.

    " + }, + "AnalysisReportId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"report-[0-9a-f]{17}" + }, + "AnalysisReportSummary":{ + "type":"structure", + "members":{ + "AnalysisReportId":{ + "shape":"String", + "documentation":"

    The name of the analysis report.

    " + }, + "CreateTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The time you created the analysis report.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time of the analysis in the report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time of the analysis in the report.

    " + }, + "Status":{ + "shape":"AnalysisStatus", + "documentation":"

    The status of the analysis report.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    List of all the tags added to the analysis report.

    " + } + }, + "documentation":"

    Retrieves the details of the performance analysis report.

    " + }, + "AnalysisReportSummaryList":{ + "type":"list", + "member":{"shape":"AnalysisReportSummary"} + }, + "AnalysisStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED" + ] + }, + "Boolean":{"type":"boolean"}, + "ContextType":{ + "type":"string", + "enum":[ + "CAUSAL", + "CONTEXTUAL" + ] + }, + "CreatePerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "StartTime", + "EndTime" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable, Amazon Web Services Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

    To use an Amazon RDS instance as a data source, you specify its DbiResourceId value. For example, specify db-ADECBTYHKTSAUMUZQYPDS2GW4A.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time defined for the analysis report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time defined for the analysis report.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to the analysis report consisting of a key-value pair.

    " + } + } + }, + "CreatePerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    A unique identifier for the created analysis report.

    " + } + } + }, + "Data":{ + "type":"structure", + "members":{ + "PerformanceInsightsMetric":{ + "shape":"PerformanceInsightsMetric", + "documentation":"

    This field determines the Performance Insights metric to render for the insight. The name field refers to a Performance Insights metric.

    " + } + }, + "documentation":"

    List of data objects which provide details about source metrics. This field can be used to determine the PI metric to render for the insight. This data type also includes static values for the metrics for the Insight that were calculated and included in text and annotations on the DB load chart.

    " + }, + "DataList":{ + "type":"list", + "member":{"shape":"Data"} + }, "DataPoint":{ "type":"structure", "required":[ @@ -139,6 +403,33 @@ "type":"list", "member":{"shape":"DataPoint"} }, + "DeletePerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "AnalysisReportId" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    The unique identifier of the analysis report for deletion.

    " + } + } + }, + "DeletePerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + } + }, "DescribeDimensionKeysRequest":{ "type":"structure", "required":[ @@ -155,7 +446,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable, Amazon Web Services Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

    To use an Amazon RDS instance as a data source, you specify its DbiResourceId value. For example, specify db-FAIHNTYBKTGAUSUZQYPDS2GW4A.

    " }, "StartTime":{ @@ -230,6 +521,17 @@ "max":2048, "min":1 }, + "DescriptiveMap":{ + "type":"map", + "key":{"shape":"DescriptiveString"}, + "value":{"shape":"DescriptiveString"} + }, + "DescriptiveString":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"^.*$" + }, "DetailStatus":{ "type":"string", "enum":[ @@ -416,6 +718,45 @@ } } }, + "GetPerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "AnalysisReportId" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    A unique identifier of the created analysis report. For example, report-12345678901234567

    " + }, + "TextFormat":{ + "shape":"TextFormat", + "documentation":"

    Indicates the text format in the report. The options are PLAIN_TEXT or MARKDOWN. The default value is plain text.

    " + }, + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

    The text language in the report. The default language is EN_US (English).

    " + } + } + }, + "GetPerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + "AnalysisReport":{ + "shape":"AnalysisReport", + "documentation":"

    The summary of the performance analysis report created for a time period.

    " + } + } + }, "GetResourceMetadataRequest":{ "type":"structure", "required":[ @@ -428,7 +769,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " } } @@ -461,7 +802,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " }, "MetricQueries":{ @@ -524,7 +865,62 @@ "type":"string", "max":256, "min":0, - "pattern":"^db-[a-zA-Z0-9-]*$" + "pattern":"^[a-zA-Z0-9-]+$" + }, + "Insight":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"String", + "documentation":"

    The unique identifier for the insight. For example, insight-12345678901234567.

    " + }, + "InsightType":{ + "shape":"String", + "documentation":"

    The type of insight. For example, HighDBLoad, HighCPU, or DominatingSQLs.

    " + }, + "Context":{ + "shape":"ContextType", + "documentation":"

    Indicates if the insight is causal or correlated insight.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time of the insight. For example, 2018-10-30T00:00:00Z.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time of the insight. For example, 2018-10-30T00:00:00Z.

    " + }, + "Severity":{ + "shape":"Severity", + "documentation":"

    The severity of the insight. The values are: Low, Medium, or High.

    " + }, + "SupportingInsights":{ + "shape":"InsightList", + "documentation":"

    List of supporting insights that provide additional factors for the insight.

    " + }, + "Description":{ + "shape":"MarkdownString", + "documentation":"

    Description of the insight. For example: A high severity Insight found between 02:00 to 02:30, where there was an unusually high DB load 600x above baseline. Likely performance impact.

    " + }, + "Recommendations":{ + "shape":"RecommendationList", + "documentation":"

    List of recommendations for the insight. For example, Investigate the following SQLs that contributed to 100% of the total DBLoad during that time period: sql-id.

    " + }, + "InsightData":{ + "shape":"DataList", + "documentation":"

    List of data objects containing metrics and references from the time range while generating the insight.

    " + }, + "BaselineData":{ + "shape":"DataList", + "documentation":"

    Metric names and values from the timeframe used as baseline to generate the insight.

    " + } + }, + "documentation":"

    Retrieves the list of performance issues which are identified.

    " + }, + "InsightList":{ + "type":"list", + "member":{"shape":"Insight"} }, "Integer":{"type":"integer"}, "InternalServiceError":{ @@ -562,7 +958,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique within an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use an Amazon RDS DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VWZ.

    " }, "Metrics":{ @@ -605,7 +1001,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique within an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use an Amazon RDS DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VWZ.

    " }, "MetricTypes":{ @@ -635,6 +1031,81 @@ } } }, + "ListPerformanceAnalysisReportsRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxResults.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return in the response. If more items exist than the specified MaxResults value, a pagination token is included in the response so that the remaining results can be retrieved.

    " + }, + "ListTags":{ + "shape":"Boolean", + "documentation":"

    Specifies whether or not to include the list of tags in the response.

    " + } + } + }, + "ListPerformanceAnalysisReportsResponse":{ + "type":"structure", + "members":{ + "AnalysisReports":{ + "shape":"AnalysisReportSummaryList", + "documentation":"

    List of reports including the report identifier, start and end time, creation time, and status.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxResults.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    Lists all the tags for the Amazon RDS Performance Insights resource. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + } + } + }, + "MarkdownString":{ + "type":"string", + "max":8000, + "min":0, + "pattern":"(.|\\n)*", + "sensitive":true + }, "MaxResults":{ "type":"integer", "max":25, @@ -718,7 +1189,7 @@ "type":"string", "max":8192, "min":1, - "pattern":"[\\s\\S]*" + "pattern":"^[a-zA-Z0-9_=-]+$" }, "NotAuthorizedException":{ "type":"structure", @@ -728,6 +1199,28 @@ "documentation":"

    The user is not authorized to perform this request.

    ", "exception":true }, + "PerformanceInsightsMetric":{ + "type":"structure", + "members":{ + "Metric":{ + "shape":"DescriptiveString", + "documentation":"

    The Performance Insights metric.

    " + }, + "DisplayName":{ + "shape":"DescriptiveString", + "documentation":"

    The Performance Insights metric name.

    " + }, + "Dimensions":{ + "shape":"DescriptiveMap", + "documentation":"

    A dimension map that contains the dimensions for this partition.

    " + }, + "Value":{ + "shape":"Double", + "documentation":"

    The value of the metric. For example, 9 for db.load.avg.

    " + } + }, + "documentation":"

    This data type helps to determine Performance Insights metric to render for the insight.

    " + }, "PeriodAlignment":{ "type":"string", "enum":[ @@ -735,6 +1228,24 @@ "START_TIME" ] }, + "Recommendation":{ + "type":"structure", + "members":{ + "RecommendationId":{ + "shape":"String", + "documentation":"

    The unique identifier for the recommendation.

    " + }, + "RecommendationDescription":{ + "shape":"MarkdownString", + "documentation":"

    The recommendation details to help resolve the performance issue. For example, Investigate the following SQLs that contributed to 100% of the total DBLoad during that time period: sql-id

    " + } + }, + "documentation":"

    The list of recommendations for the insight.

    " + }, + "RecommendationList":{ + "type":"list", + "member":{"shape":"Recommendation"} + }, "RequestString":{ "type":"string", "max":256, @@ -812,11 +1323,122 @@ "DOCDB" ] }, + "Severity":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "String":{ "type":"string", "max":256, "min":0, "pattern":".*\\S.*" + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

    " + } + }, + "documentation":"

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^.*$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN", + "Tags" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon RDS Performance Insights resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^.*$" + }, + "TextFormat":{ + "type":"string", + "enum":[ + "PLAIN_TEXT", + "MARKDOWN" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN", + "TagKeys" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon RDS Performance Insights resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The metadata assigned to an Amazon RDS Performance Insights resource consisting of a key-value pair.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } } }, "documentation":"Amazon RDS Performance Insights

    Amazon RDS Performance Insights enables you to monitor and explore different dimensions of database load based on data captured from a running DB instance. The guide provides detailed information about Performance Insights data types, parameters and errors.

    When Performance Insights is enabled, the Amazon RDS Performance Insights API provides visibility into the performance of your DB instance. Amazon CloudWatch provides the authoritative source for Amazon Web Services service-vended monitoring metrics. Performance Insights offers a domain-specific view of DB load.

    DB load is measured as average active sessions. Performance Insights provides the data to API consumers as a two-dimensional time-series dataset. The time dimension provides DB load data for each time point in the queried time range. Each time point decomposes overall load in relation to the requested dimensions, measured at that time point. Examples include SQL, Wait event, User, and Host.

    " diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index dc85d2167d29..3a841bb354b1 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/customization.config b/services/pinpoint/src/main/resources/codegen-resources/customization.config index b77ce5f489bf..4d02e0dff44c 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/customization.config +++ b/services/pinpoint/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : ["*"], + "excludedSimpleMethods" : ["*"], "renameShapes": { // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. "__EndpointTypesElement": "EndpointTypesElement" diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 57efc97b3506..c6f5cf2bd6c9 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointemail/src/main/resources/codegen-resources/customization.config b/services/pinpointemail/src/main/resources/codegen-resources/customization.config index 95538dcb3624..6bef416549e5 100644 --- a/services/pinpointemail/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointemail/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "getAccount", "getDeliverabilityDashboardOptions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getDedicatedIps", "listConfigurationSets", "listDedicatedIpPools", diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 1661eb0a5514..d0d56e7ba031 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config index 03992c18dda4..7bd34e376145 100644 --- a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listConfigurationSets" ] } diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 84ac02000bb5..374be129cf57 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 212cf8332812..cdb1e2b895d5 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index a021f7729890..f7155ae5656d 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json index 52b2277d14f1..c25e9e1b918e 100644 --- a/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://polly-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://polly-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://polly-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://polly-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://polly.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://polly.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://polly.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://polly.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 1b35df9150e3..a175bc3b5b8b 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -1108,7 +1108,8 @@ "Niamh", "Sofie", "Lisa", - "Isabelle" + "Isabelle", + "Zayd" ] }, "VoiceList":{ diff --git a/services/pom.xml b/services/pom.xml index 3ffc799d0be5..8b939765ea4d 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT services AWS Java SDK :: Services @@ -373,6 +373,8 @@ medicalimaging entityresolution managedblockchainquery + pcaconnectorad + neptunedata The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index da21d3eef282..02fd9253c951 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/customization.config b/services/pricing/src/main/resources/codegen-resources/customization.config index 6aad4b5fe48b..55e49fef5fa8 100644 --- a/services/pricing/src/main/resources/codegen-resources/customization.config +++ b/services/pricing/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeServices" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getProducts" ] } diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index b54ddebbd0b3..0498a5b9ced9 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 44b0a82984d7..c8a1718a39b5 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 3deef7058ea1..c3332ed7aec2 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 04225cd2f4a8..f77fd5dad57b 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index b92d96a71ee1..51ac2f43f236 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json index e5ce3441c3ce..512e1e44b043 100644 --- a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json +++ b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "DescribeFolderPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, + "DescribeFolderResolvedPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, "ListAnalyses": { "input_token": "NextToken", "output_token": "NextToken", @@ -42,6 +54,18 @@ "limit_key": "MaxResults", "result_key": "DataSources" }, + "ListFolderMembers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderMemberList" + }, + "ListFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "ListGroupMemberships": { "input_token": "NextToken", "output_token": "NextToken", @@ -154,6 +178,12 @@ "limit_key": "MaxResults", "result_key": "DataSourceSummaries" }, + "SearchFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "SearchGroups": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index adde7fcdf864..c30dad216b44 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -1230,6 +1230,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], @@ -1248,6 +1249,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], @@ -2442,7 +2444,7 @@ {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ], - "documentation":"

    Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

    • 1 paginated PDF

    • 5 CSVs

    Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

    " + "documentation":"

    Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

    • 1 paginated PDF

    • 1 Excel workbook

    • 5 CSVs

    Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

    " }, "TagResource":{ "name":"TagResource", @@ -3225,6 +3227,12 @@ "min":1, "pattern":"[\\w\\-]+|(\\$LATEST)|(\\$PUBLISHED)" }, + "AllSheetsFilterScopeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The configuration for applying a filter to all sheets. You can apply this filter to all visuals on every sheet.

    This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

    " + }, "AmazonElasticsearchParameters":{ "type":"structure", "required":["Domain"], @@ -6907,13 +6915,13 @@ }, "MemberId":{ "shape":"RestrictiveResourceId", - "documentation":"

    The ID of the asset (the dashboard, analysis, or dataset).

    ", + "documentation":"

    The ID of the asset that you want to add to the folder.

    ", "location":"uri", "locationName":"MemberId" }, "MemberType":{ "shape":"MemberType", - "documentation":"

    The type of the member, including DASHBOARD, ANALYSIS, and DATASET.

    ", + "documentation":"

    The member type of the asset that you want to add to a folder.

    ", "location":"uri", "locationName":"MemberType" } @@ -6974,6 +6982,10 @@ "Tags":{ "shape":"TagList", "documentation":"

    Tags for the folder.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    An optional parameter that determines the sharing scope of the folder. The default value for this parameter is ACCOUNT.

    " } } }, @@ -10428,13 +10440,13 @@ }, "MemberId":{ "shape":"RestrictiveResourceId", - "documentation":"

    The ID of the asset (the dashboard, analysis, or dataset) that you want to delete.

    ", + "documentation":"

    The ID of the asset that you want to delete.

    ", "location":"uri", "locationName":"MemberId" }, "MemberType":{ "shape":"MemberType", - "documentation":"

    The type of the member, including DASHBOARD, ANALYSIS, and DATASET

    ", + "documentation":"

    The member type of the asset that you want to delete from a folder.

    ", "location":"uri", "locationName":"MemberType" } @@ -12124,6 +12136,25 @@ "documentation":"

    The ID of the folder.

    ", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

    The namespace of the folder whose permissions you want described.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request.

    ", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" } } }, @@ -12150,6 +12181,10 @@ "RequestId":{ "shape":"String", "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The pagination token for the next set of results, or null if there are no more results.

    " } } }, @@ -12192,6 +12227,25 @@ "documentation":"

    The ID of the folder.

    ", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

    The namespace of the folder whose permissions you want described.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request.

    ", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" } } }, @@ -12218,6 +12272,10 @@ "RequestId":{ "shape":"String", "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results, or null if there are no more results.

    " } } }, @@ -14267,6 +14325,10 @@ "SelectedSheets":{ "shape":"SelectedSheetsFilterScopeConfiguration", "documentation":"

    The configuration for applying a filter to specific sheets.

    " + }, + "AllSheets":{ + "shape":"AllSheetsFilterScopeConfiguration", + "documentation":"

    The configuration for applying a filter to all sheets.

    " } }, "documentation":"

    The scope configuration for a FilterGroup.

    This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

    " @@ -14427,6 +14489,10 @@ "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

    The time that the folder was last updated.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    The sharing scope of the folder.

    " } }, "documentation":"

    A folder in Amazon QuickSight.

    " @@ -14521,6 +14587,10 @@ "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

    The time that the folder was last updated.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    The sharing scope of the folder.

    " } }, "documentation":"

    A summary of information about an existing Amazon QuickSight folder.

    " @@ -14618,10 +14688,7 @@ }, "ForecastComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -15938,10 +16005,7 @@ }, "GrowthRateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -19269,7 +19333,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", "Type" ], "members":{ @@ -19354,12 +19417,7 @@ }, "MetricComparisonComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time", - "FromValue", - "TargetValue" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -20636,10 +20694,7 @@ }, "PeriodOverPeriodComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -20662,10 +20717,7 @@ }, "PeriodToDateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -23883,6 +23935,13 @@ }, "documentation":"

    The shape conditional formatting of a filled map visual.

    " }, + "SharingModel":{ + "type":"string", + "enum":[ + "ACCOUNT", + "NAMESPACE" + ] + }, "Sheet":{ "type":"structure", "members":{ @@ -24398,11 +24457,11 @@ "members":{ "SheetSelections":{ "shape":"SnapshotFileSheetSelectionList", - "documentation":"

    A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations or 1 configuration for PDF.

    " + "documentation":"

    A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations, 5 Excel configurations, or 1 configuration for PDF.

    " }, "FormatType":{ "shape":"SnapshotFileFormatType", - "documentation":"

    The format of the snapshot file to be generated. You can choose between CSV or PDF.

    " + "documentation":"

    The format of the snapshot file to be generated. You can choose between CSV, Excel, or PDF.

    " } }, "documentation":"

    A structure that contains the information for the snapshot that you want to generate. This information is provided by you when you start a new snapshot job.

    " @@ -24411,7 +24470,8 @@ "type":"string", "enum":[ "CSV", - "PDF" + "PDF", + "EXCEL" ] }, "SnapshotFileGroup":{ @@ -24427,7 +24487,7 @@ "SnapshotFileGroupList":{ "type":"list", "member":{"shape":"SnapshotFileGroup"}, - "max":6, + "max":7, "min":1 }, "SnapshotFileList":{ @@ -24445,15 +24505,15 @@ "members":{ "SheetId":{ "shape":"ShortRestrictiveResourceId", - "documentation":"

    The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV and PDF format types.

    " + "documentation":"

    The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV, Excel, and PDF format types.

    " }, "SelectionScope":{ "shape":"SnapshotFileSheetSelectionScope", - "documentation":"

    The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

    • ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.

    • SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV.

    " + "documentation":"

    The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

    • ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.

    • SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV or Excel workbook.

    " }, "VisualIds":{ "shape":"SnapshotFileSheetSelectionVisualIdList", - "documentation":"

    A list of visual IDs that are located in the selected sheet. This structure supports tables and pivot tables. This structure is required if you are generating a CSV. You can add a maximum of 1 visual ID to this structure.

    " + "documentation":"

    A structure that lists the IDs of the visuals in the selected sheet. Supported visual types are table, pivot table visuals. This value is required if you are generating a CSV or Excel workbook. This value supports a maximum of 1 visual ID for CSV and 5 visual IDs across up to 5 sheet selections for Excel. If you are generating an Excel workbook, the order of the visual IDs provided in this structure determines the order of the worksheets in the Excel file.

    " } }, "documentation":"

    A structure that contains information that identifies the snapshot that needs to be generated.

    " @@ -24461,7 +24521,7 @@ "SnapshotFileSheetSelectionList":{ "type":"list", "member":{"shape":"SnapshotFileSheetSelection"}, - "max":1, + "max":5, "min":1 }, "SnapshotFileSheetSelectionScope":{ @@ -24474,7 +24534,7 @@ "SnapshotFileSheetSelectionVisualIdList":{ "type":"list", "member":{"shape":"ShortRestrictiveResourceId"}, - "max":1, + "max":5, "min":1 }, "SnapshotJobErrorInfo":{ @@ -25425,14 +25485,24 @@ "members":{ "SelectedFieldOptions":{ "shape":"TableFieldOptionList", - "documentation":"

    The selected field options for the table field options.

    " + "documentation":"

    The field options to be configured to a table.

    " }, "Order":{ "shape":"FieldOrderList", - "documentation":"

    The order of field IDs of the field options for a table visual.

    " + "documentation":"

    The order of the field IDs that are configured as field options for a table visual.

    " + }, + "PinnedFieldOptions":{ + "shape":"TablePinnedFieldOptions", + "documentation":"

    The settings for the pinned columns of a table visual.

    " } }, - "documentation":"

    The field options for a table visual.

    " + "documentation":"

    The field options of a table visual.

    " + }, + "TableFieldOrderList":{ + "type":"list", + "member":{"shape":"FieldId"}, + "documentation":"

    A list of table field IDs.

    ", + "max":201 }, "TableFieldURLConfiguration":{ "type":"structure", @@ -25520,6 +25590,16 @@ }, "documentation":"

    The paginated report options for a table visual.

    " }, + "TablePinnedFieldOptions":{ + "type":"structure", + "members":{ + "PinnedLeftFields":{ + "shape":"TableFieldOrderList", + "documentation":"

    A list of columns to be pinned to the left of a table visual.

    " + } + }, + "documentation":"

    The settings for the pinned columns of a table visual.

    " + }, "TableRowConditionalFormatting":{ "type":"structure", "members":{ @@ -26660,8 +26740,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", - "Category", "Type" ], "members":{ @@ -26709,7 +26787,6 @@ "type":"structure", "required":[ "ComputationId", - "Category", "Type" ], "members":{ @@ -27311,10 +27388,7 @@ }, "TotalAggregationComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Value" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -27690,10 +27764,7 @@ }, "UniqueValuesComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Category" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", diff --git a/services/ram/pom.xml b/services/ram/pom.xml index 9b04ef324631..255588bed207 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 86acca2d4cea..e46be3440b7a 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 9ea4980a58ca..89d3ef09d169 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/customization.config b/services/rds/src/main/resources/codegen-resources/customization.config index 547a01d77f05..fa6c19c93539 100644 --- a/services/rds/src/main/resources/codegen-resources/customization.config +++ b/services/rds/src/main/resources/codegen-resources/customization.config @@ -56,7 +56,7 @@ ] } }, - "blacklistedSimpleMethods" : ["failoverDBCluster"], + "excludedSimpleMethods" : ["failoverDBCluster"], "deprecatedShapes" : [ "BackupPolicyNotFoundFault" ], diff --git a/services/rds/src/main/resources/codegen-resources/paginators-1.json b/services/rds/src/main/resources/codegen-resources/paginators-1.json index b6db47f81c06..41d01c2eb56e 100644 --- a/services/rds/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rds/src/main/resources/codegen-resources/paginators-1.json @@ -12,6 +12,12 @@ "output_token": "Marker", "result_key": "Certificates" }, + "DescribeDBClusterAutomatedBackups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterAutomatedBackups" + }, "DescribeDBClusterBacktracks": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index c50e1eb47d7e..6de8ab8e1bb7 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -1611,7 +1611,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

    Initiates the failover process for an Aurora global database (GlobalCluster).

    A failover for an Aurora global database promotes one of secondary read-only DB clusters to be the primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB cluster. In other words, the role of the current primary DB cluster and the selected (target) DB cluster are switched. The selected secondary DB cluster assumes full read/write capabilities for the Aurora global database.

    For more information about failing over an Amazon Aurora global database, see Managed planned failover for Amazon Aurora global databases in the Amazon Aurora User Guide.

    This action applies to GlobalCluster (Aurora global databases) only. Use this action only on healthy Aurora global databases with running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios or to reconfigure your Aurora global database topology.

    " + "documentation":"

    Promotes the specified secondary DB cluster to be the primary DB cluster in the global database cluster to fail over or switch over a global database. Switchover operations were previously called \"managed planned failovers.\"

    Although this operation can be used either to fail over or to switch over a global database cluster, its intended use is for global database failover. To switch over a global database cluster, we recommend that you use the SwitchoverGlobalCluster operation instead.

    How you use this operation depends on whether you are failing over or switching over your global database cluster:

    • Failing over - Specify the AllowDataLoss parameter and don't specify the Switchover parameter.

    • Switching over - Specify the Switchover parameter or omit it, but don't specify the AllowDataLoss parameter.

    About failing over and switching over

    While failing over and switching over a global database cluster both change the primary DB cluster, you use these operations for different reasons:

    • Failing over - Use this operation to respond to an unplanned event, such as a Regional disaster in the primary Region. Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.

      For more information about failing over an Amazon Aurora global database, see Performing managed failovers for Aurora global databases in the Amazon Aurora User Guide.

    • Switching over - Use this operation on a healthy global database cluster for planned events, such as Regional rotation or to fail back to the original primary DB cluster after a failover operation. With this operation, there is no data loss.

      For more information about switching over an Amazon Aurora global database, see Performing switchovers for Aurora global databases in the Amazon Aurora User Guide.

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1986,7 +1986,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

    Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This operation only applies to Aurora global database clusters.

    " + "documentation":"

    Modifies a setting for an Amazon Aurora global database cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This operation only applies to Aurora global database clusters.

    " }, "ModifyOptionGroup":{ "name":"ModifyOptionGroup", @@ -2284,6 +2284,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"StorageQuotaExceededFault"}, {"shape":"InvalidVPCNetworkStateFault"}, + {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"}, {"shape":"InvalidRestoreFault"}, {"shape":"DBSubnetGroupNotFoundFault"}, {"shape":"InvalidSubnet"}, @@ -2657,6 +2658,25 @@ ], "documentation":"

    Switches over a blue/green deployment.

    Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, + "SwitchoverGlobalCluster":{ + "name":"SwitchoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SwitchoverGlobalClusterMessage"}, + "output":{ + "shape":"SwitchoverGlobalClusterResult", + "resultWrapper":"SwitchoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

    Switches over the specified secondary DB cluster to be the new primary DB cluster in the global database cluster. Switchover operations were previously called \"managed planned failovers.\"

    Aurora promotes the specified secondary cluster to assume full read/write capabilities and demotes the current primary cluster to a secondary (read-only) cluster, maintaining the orginal replication topology. All secondary clusters are synchronized with the primary at the beginning of the process so the new primary continues operations for the Aurora global database without losing any data. Your database is unavailable for a short time while the primary and selected secondary clusters are assuming their new roles. For more information about switching over an Aurora global database, see Performing switchovers for Amazon Aurora global databases in the Amazon Aurora User Guide.

    This operation is intended for controlled environments, for operations such as \"regional rotation\" or to fall back to the original primary after a global database failover.

    " + }, "SwitchoverReadReplica":{ "name":"SwitchoverReadReplica", "http":{ @@ -3661,7 +3681,15 @@ "shape":"CustomDBEngineVersionManifest", "documentation":"

    The CEV manifest, which is a JSON document that describes the installation .zip files stored in Amazon S3. Specify the name/value pairs in a file or a quoted string. RDS Custom applies the patches in the order in which they are listed.

    The following JSON fields are valid:

    MediaImportTemplateVersion

    Version of the CEV manifest. The date is in the format YYYY-MM-DD.

    databaseInstallationFileNames

    Ordered list of installation files for the CEV.

    opatchFileNames

    Ordered list of OPatch installers used for the Oracle DB engine.

    psuRuPatchFileNames

    The PSU and RU patches for this CEV.

    OtherPatchFileNames

    The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches.

    For more information, see Creating the CEV manifest in the Amazon RDS User Guide.

    " }, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "SourceCustomDbEngineVersionIdentifier":{ + "shape":"String255", + "documentation":"

    Reserved for future use.

    " + }, + "UseAwsProvidedLatestImage":{ + "shape":"BooleanOptional", + "documentation":"

    Reserved for future use.

    " + } } }, "CreateDBClusterEndpointMessage":{ @@ -3847,7 +3875,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

    The storage type to associate with the DB cluster.

    For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

    This setting is required to create a Multi-AZ DB cluster.

    When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Valid Values:

    • Aurora DB clusters - aurora | aurora-iopt1

    • Multi-AZ DB clusters - io1

    Default:

    • Aurora DB clusters - aurora

    • Multi-AZ DB clusters - io1

    " + "documentation":"

    The storage type to associate with the DB cluster.

    For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

    This setting is required to create a Multi-AZ DB cluster.

    When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Valid Values:

    • Aurora DB clusters - aurora | aurora-iopt1

    • Multi-AZ DB clusters - io1

    Default:

    • Aurora DB clusters - aurora

    • Multi-AZ DB clusters - io1

    When you create an Aurora DB cluster with the storage type set to aurora-iopt1, the storage type is returned in the response. The storage type isn't returned when you set it to aurora.

    " }, "Iops":{ "shape":"IntegerOptional", @@ -5117,6 +5145,10 @@ "LocalWriteForwardingStatus":{ "shape":"LocalWriteForwardingStatus", "documentation":"

    Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process of enabling it.

    " + }, + "AwsBackupRecoveryPointArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

    " } }, "documentation":"

    Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.

    For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

    For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

    For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

    ", @@ -5225,6 +5257,10 @@ "Iops":{ "shape":"IntegerOptional", "documentation":"

    The IOPS (I/O operations per second) value for the automated backup.

    This setting is only for non-Aurora Multi-AZ DB clusters.

    " + }, + "AwsBackupRecoveryPointArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

    " } }, "documentation":"

    An automated backup of a DB cluster. It consists of system backups, transaction logs, and the database cluster properties that existed at the time you deleted the source cluster.

    ", @@ -6502,6 +6538,10 @@ "StorageThroughput":{ "shape":"IntegerOptional", "documentation":"

    Specifies the storage throughput for the automated backup.

    " + }, + "AwsBackupRecoveryPointArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

    " } }, "documentation":"

    An automated backup of a DB instance. It consists of system backups, transaction logs, and the database instance properties that existed at the time you deleted the source instance.

    ", @@ -9730,11 +9770,19 @@ "members":{ "GlobalClusterIdentifier":{ "shape":"GlobalClusterIdentifier", - "documentation":"

    Identifier of the Aurora global database (GlobalCluster) that should be failed over. The identifier is the unique key assigned by the user when the Aurora global database was created. In other words, it's the name of the Aurora global database that you want to fail over.

    Constraints:

    • Must match the identifier of an existing GlobalCluster (Aurora global database).

    " + "documentation":"

    The identifier of the global database cluster (Aurora global database) this operation should apply to. The identifier is the unique key assigned by the user when the Aurora global database is created. In other words, it's the name of the Aurora global database.

    Constraints:

    • Must match the identifier of an existing global database cluster.

    " }, "TargetDbClusterIdentifier":{ "shape":"DBClusterIdentifier", - "documentation":"

    Identifier of the secondary Aurora DB cluster that you want to promote to primary for the Aurora global database (GlobalCluster.) Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + "documentation":"

    The identifier of the secondary Aurora DB cluster that you want to promote to the primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + }, + "AllowDataLoss":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether to allow data loss for this global database cluster operation. Allowing data loss triggers a global failover operation.

    If you don't specify AllowDataLoss, the global database cluster operation defaults to a switchover.

    Constraints:

    • Can't be specified together with the Switchover parameter.

    " + }, + "Switchover":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether to switch over this global database cluster.

    Constraints:

    • Can't be specified together with the AllowDataLoss parameter.

    " } } }, @@ -9749,7 +9797,7 @@ "members":{ "Status":{ "shape":"FailoverStatus", - "documentation":"

    The current status of the Aurora global database (GlobalCluster). Possible values are as follows:

    • pending – A request to fail over the Aurora global database (GlobalCluster) has been received by the service. The GlobalCluster's primary DB cluster and the specified secondary DB cluster are being verified before the failover process can start.

    • failing-over – This status covers the range of Aurora internal operations that take place during the failover process, such as demoting the primary Aurora DB cluster, promoting the secondary Aurora DB, and synchronizing replicas.

    • cancelling – The request to fail over the Aurora global database (GlobalCluster) was cancelled and the primary Aurora DB cluster and the selected secondary Aurora DB cluster are returning to their previous states.

    " + "documentation":"

    The current status of the global cluster. Possible values are as follows:

    • pending – The service received a request to switch over or fail over the global cluster. The global cluster's primary DB cluster and the specified secondary DB cluster are being verified before the operation starts.

    • failing-over – This status covers the range of Aurora internal operations that take place during the switchover or failover process, such as demoting the primary Aurora DB cluster, promoting the secondary Aurora DB cluster, and synchronizing replicas.

    • cancelling – The request to switch over or fail over the global cluster was cancelled and the primary Aurora DB cluster and the selected secondary Aurora DB cluster are returning to their previous states.

    " }, "FromDbClusterArn":{ "shape":"String", @@ -9758,9 +9806,13 @@ "ToDbClusterArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being promoted, and which is associated with this state.

    " + }, + "IsDataLossAllowed":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the operation is a global switchover or a global failover. If data loss is allowed, then the operation is a global failover. Otherwise, it's a switchover.

    " } }, - "documentation":"

    Contains the state of scheduled or in-process failover operations on an Aurora global database (GlobalCluster). This Data type is empty unless a failover operation is scheduled or is currently underway on the Aurora global database.

    ", + "documentation":"

    Contains the state of scheduled or in-process operations on a global cluster (Aurora global database). This data type is empty unless a switchover or failover operation is scheduled or is in progress on the Aurora global database.

    ", "wrapper":true }, "FailoverStatus":{ @@ -9852,7 +9904,7 @@ }, "FailoverState":{ "shape":"FailoverState", - "documentation":"

    A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster).

    " + "documentation":"

    A data object containing all properties for the current state of an in-process or pending switchover or failover process for this global cluster (Aurora global database). This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster.

    " } }, "documentation":"

    A data type representing an Aurora global database.

    ", @@ -9888,22 +9940,26 @@ "members":{ "DBClusterArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) for each Aurora cluster.

    " + "documentation":"

    The Amazon Resource Name (ARN) for each Aurora DB cluster in the global cluster.

    " }, "Readers":{ "shape":"ReadersArnList", - "documentation":"

    The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the Aurora global database.

    " + "documentation":"

    The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the global cluster.

    " }, "IsWriter":{ "shape":"Boolean", - "documentation":"

    Specifies whether the Aurora cluster is the primary cluster (that is, has read-write capability) for the Aurora global database with which it is associated.

    " + "documentation":"

    Specifies whether the Aurora DB cluster is the primary cluster (that is, has read-write capability) for the global cluster with which it is associated.

    " }, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", - "documentation":"

    Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

    " + "documentation":"

    Specifies whether a secondary cluster in the global cluster has write forwarding enabled, not enabled, or is in the process of enabling it.

    " + }, + "SynchronizationStatus":{ + "shape":"GlobalClusterMemberSynchronizationStatus", + "documentation":"

    The status of synchronization of each Aurora DB cluster in the global cluster.

    " } }, - "documentation":"

    A data structure with information about any primary and secondary clusters associated with an Aurora global database.

    ", + "documentation":"

    A data structure with information about any primary and secondary clusters associated with a global cluster (Aurora global database).

    ", "wrapper":true }, "GlobalClusterMemberList":{ @@ -9913,6 +9969,13 @@ "locationName":"GlobalClusterMember" } }, + "GlobalClusterMemberSynchronizationStatus":{ + "type":"string", + "enum":[ + "connected", + "pending-resync" + ] + }, "GlobalClusterNotFoundFault":{ "type":"structure", "members":{ @@ -10784,6 +10847,10 @@ "EnableLocalWriteForwarding":{ "shape":"BooleanOptional", "documentation":"

    Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

    Valid for: Aurora DB clusters only

    " + }, + "AwsBackupRecoveryPointArn":{ + "shape":"AwsBackupRecoveryPointArn", + "documentation":"

    The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

    " } }, "documentation":"

    " @@ -12930,7 +12997,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

    The version number of the database engine to use.

    To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

    Aurora MySQL

    Examples: 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0

    " + "documentation":"

    The version number of the database engine to use.

    To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

    Aurora MySQL

    Examples: 5.7.mysql_aurora.2.12.0, 8.0.mysql_aurora.3.04.0

    " }, "Port":{ "shape":"IntegerOptional", @@ -12975,7 +13042,7 @@ }, "SourceEngineVersion":{ "shape":"String", - "documentation":"

    The version of the database that the backup files were created from.

    MySQL versions 5.5, 5.6, and 5.7 are supported.

    Example: 5.6.40, 5.7.28

    " + "documentation":"

    The version of the database that the backup files were created from.

    MySQL versions 5.7 and 8.0 are supported.

    Example: 5.7.40, 8.0.28

    " }, "S3BucketName":{ "shape":"String", @@ -14565,6 +14632,29 @@ "member":{"shape":"SwitchoverDetail"} }, "SwitchoverDetailStatus":{"type":"string"}, + "SwitchoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

    The identifier of the global database cluster to switch over. This parameter isn't case-sensitive.

    Constraints:

    • Must match the identifier of an existing global database cluster (Aurora global database).

    " + }, + "TargetDbClusterIdentifier":{ + "shape":"DBClusterIdentifier", + "documentation":"

    The identifier of the secondary Aurora DB cluster to promote to the new primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + } + } + }, + "SwitchoverGlobalClusterResult":{ + "type":"structure", + "members":{ + "GlobalCluster":{"shape":"GlobalCluster"} + } + }, "SwitchoverReadReplicaMessage":{ "type":"structure", "required":["DBInstanceIdentifier"], diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 6259decdc6db..fed664f990e1 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index d1a33631b3db..a398741f9c98 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/customization.config b/services/redshift/src/main/resources/codegen-resources/customization.config index a15637c838c4..635247225c35 100644 --- a/services/redshift/src/main/resources/codegen-resources/customization.config +++ b/services/redshift/src/main/resources/codegen-resources/customization.config @@ -21,7 +21,7 @@ "describeStorage", "describeTags" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeTableRestoreStatus", "describeClusterSecurityGroups" ] diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 2a37d4d50abd..00ee24004d3a 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index 0795179ed939..caeaab95e4f0 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index f5864f271404..7a57641d60f8 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/customization.config b/services/rekognition/src/main/resources/codegen-resources/customization.config index 77a06980da8e..ece3c940cc7d 100644 --- a/services/rekognition/src/main/resources/codegen-resources/customization.config +++ b/services/rekognition/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listCollections", "listStreamProcessors" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeTableRestoreStatus", "describeClusterSecurityGroups" ] diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 33b5841bb5d4..ad133cd7e2b3 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 22587e02f75a..3bb9585bd6d4 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index 651a5d70f92b..2e3cb6a855b2 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index d411662b193f..3220dc25d1c5 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 838cc84847e6..14fa5e53d7ea 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 2dc9be0a4a65..d1f7c2a4ba54 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 196f1971c38d..07f50b45c9f4 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 81cf9e46861f..d242f883f094 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/customization.config b/services/route53domains/src/main/resources/codegen-resources/customization.config index df62beb2be1c..cbc750ecf2cd 100644 --- a/services/route53domains/src/main/resources/codegen-resources/customization.config +++ b/services/route53domains/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listDomains", "listOperations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "viewBilling", "getContactReachabilityStatus" ] diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json index 3ccf51cbf5b4..3f5f87833147 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index 8bb0ddb21abe..db2da8ebd49d 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -543,7 +543,7 @@ "documentation":"

    The name of the domain that was specified when another Amazon Web Services account submitted a TransferDomainToAnotherAwsAccount request.

    " }, "Password":{ - "shape":"String", + "shape":"Password", "documentation":"

    The password that was returned by the TransferDomainToAnotherAwsAccount request.

    " } }, @@ -567,7 +567,8 @@ }, "AddressLine":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "AssociateDelegationSignerToDomainRequest":{ "type":"structure", @@ -693,13 +694,18 @@ "Transferability":{ "shape":"DomainTransferability", "documentation":"

    A complex type that contains information about whether the specified domain can be transferred to Route 53.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    Provides an explanation for when a domain can't be transferred.

    " } }, "documentation":"

    The CheckDomainTransferability response includes the following elements.

    " }, "City":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "Consent":{ "type":"structure", @@ -784,11 +790,13 @@ }, "ContactName":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "ContactNumber":{ "type":"string", - "max":30 + "max":30, + "sensitive":true }, "ContactType":{ "type":"string", @@ -1054,7 +1062,8 @@ "ZA", "ZM", "ZW" - ] + ], + "sensitive":true }, "Currency":{ "type":"string", @@ -1366,6 +1375,10 @@ "DuplicateRequest":{ "type":"structure", "members":{ + "requestId":{ + "shape":"RequestId", + "documentation":"

    ID of the request operation.

    " + }, "message":{ "shape":"ErrorMessage", "documentation":"

    The request is already in progress for the domain.

    " @@ -1381,7 +1394,8 @@ }, "Email":{ "type":"string", - "max":254 + "max":254, + "sensitive":true }, "EnableDomainAutoRenewRequest":{ "type":"structure", @@ -1830,7 +1844,7 @@ }, "SortOrder":{ "shape":"SortOrder", - "documentation":"

    The sort order ofr returned values, either ascending or descending.

    " + "documentation":"

    The sort order for returned values, either ascending or descending.

    " } }, "documentation":"

    The ListOperations request includes the following elements.

    " @@ -1908,6 +1922,7 @@ }, "documentation":"

    The ListTagsForDomain response includes the following elements.

    " }, + "Message":{"type":"string"}, "Nameserver":{ "type":"structure", "required":["Name"], @@ -2044,6 +2059,10 @@ "type":"integer", "max":100 }, + "Password":{ + "type":"string", + "sensitive":true + }, "Price":{"type":"double"}, "PriceWithCurrency":{ "type":"structure", @@ -2207,6 +2226,7 @@ } } }, + "RequestId":{"type":"string"}, "Reseller":{"type":"string"}, "ResendContactReachabilityEmailRequest":{ "type":"structure", @@ -2292,7 +2312,8 @@ }, "State":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "StatusFlag":{ "type":"string", @@ -2451,7 +2472,7 @@ "documentation":"

    Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

    " }, "Password":{ - "shape":"String", + "shape":"Password", "documentation":"

    To finish transferring a domain to another Amazon Web Services account, the account that the domain is being transferred to must submit an AcceptDomainTransferFromAnotherAwsAccount request. The request must include the value of the Password element that was returned in the TransferDomainToAnotherAwsAccount response.

    " } }, @@ -2652,7 +2673,8 @@ }, "ZipCode":{ "type":"string", - "max":255 + "max":255, + "sensitive":true } }, "documentation":"

    Amazon Route 53 API actions let you register domain names and perform related operations.

    " diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 0dd3739e76f9..6e4ae08e1dcc 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 9e7d93e6b954..737eee244f3a 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index f9ec52f370c0..5f7bd59c98f9 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index a628b91cb9b2..6f69eca6e1bb 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/route53resolver/src/main/resources/codegen-resources/customization.config b/services/route53resolver/src/main/resources/codegen-resources/customization.config index c06646195ea5..daf08da254a2 100644 --- a/services/route53resolver/src/main/resources/codegen-resources/customization.config +++ b/services/route53resolver/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listResolverEndpoints", "listResolverRuleAssociations", "listResolverRules" diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 1df461cb60a2..7d3810c706e8 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 45d84cadce4a..541b3cefa19f 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 @@ -158,6 +158,11 @@ equalsverifier test + + com.google.jimfs + jimfs + test + diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java index 1ca5526a1e4b..c95d47f8fe54 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java @@ -50,6 +50,7 @@ import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -200,6 +201,38 @@ public void getObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOE } } + @Test + public void deleteObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + client.putObject(r -> r.bucket(testBucket).key(objectKey), RequestBody.fromString("DeleteObjectPresignRequestTest")); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket(testBucket) + .key(testGetObjectKey) + .requestPayer(RequestPayer.REQUESTER))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + ContentStreamProvider requestPayload = presigned.signedPayload() + .map(SdkBytes::asContentStreamProvider) + .orElse(null); + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .contentStreamProvider(requestPayload) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.responseBody()).isEmpty(); + assertThat(response.httpResponse().statusCode()).isEqualTo(204); + } + @Test public void putObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { String objectKey = generateRandomObjectKey(); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java new file mode 100644 index 000000000000..9fc199175bda --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; +import software.amazon.awssdk.utils.Validate; + +/** + * Internal utility class to resolve {@link MultipartConfiguration}. + */ +@SdkInternalApi +public final class MultipartConfigurationResolver { + + private static final long DEFAULT_MIN_PART_SIZE = 8L * 1024 * 1024; + private final long minimalPartSizeInBytes; + private final long apiCallBufferSize; + private final long thresholdInBytes; + + public MultipartConfigurationResolver(MultipartConfiguration multipartConfiguration) { + Validate.notNull(multipartConfiguration, "multipartConfiguration"); + this.minimalPartSizeInBytes = Validate.getOrDefault(multipartConfiguration.minimumPartSizeInBytes(), + () -> DEFAULT_MIN_PART_SIZE); + this.apiCallBufferSize = Validate.getOrDefault(multipartConfiguration.apiCallBufferSizeInBytes(), + () -> minimalPartSizeInBytes * 4); + this.thresholdInBytes = Validate.getOrDefault(multipartConfiguration.thresholdInBytes(), () -> minimalPartSizeInBytes); + } + + public long minimalPartSizeInBytes() { + return minimalPartSizeInBytes; + } + + public long thresholdInBytes() { + return thresholdInBytes; + } + + public long apiCallBufferSize() { + return apiCallBufferSize; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java index 65b26ddec971..8b53099b8683 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java @@ -46,10 +46,6 @@ public final class MultipartS3AsyncClient extends DelegatingS3AsyncClient { private static final ApiName USER_AGENT_API_NAME = ApiName.builder().name("hll").version("s3Multipart").build(); - private static final long DEFAULT_MIN_PART_SIZE = 8L * 1024 * 1024; - private static final long DEFAULT_THRESHOLD = 8L * 1024 * 1024; - private static final long DEFAULT_API_CALL_BUFFER_SIZE = DEFAULT_MIN_PART_SIZE * 4; - private final UploadObjectHelper mpuHelper; private final CopyObjectHelper copyObjectHelper; @@ -57,21 +53,13 @@ private MultipartS3AsyncClient(S3AsyncClient delegate, MultipartConfiguration mu super(delegate); MultipartConfiguration validConfiguration = Validate.getOrDefault(multipartConfiguration, MultipartConfiguration.builder()::build); - long minPartSizeInBytes = Validate.getOrDefault(validConfiguration.minimumPartSizeInBytes(), - () -> DEFAULT_MIN_PART_SIZE); - long threshold = Validate.getOrDefault(validConfiguration.thresholdInBytes(), - () -> DEFAULT_THRESHOLD); - long apiCallBufferSizeInBytes = Validate.getOrDefault(validConfiguration.apiCallBufferSizeInBytes(), - () -> computeApiCallBufferSize(validConfiguration)); - mpuHelper = new UploadObjectHelper(delegate, minPartSizeInBytes, threshold, apiCallBufferSizeInBytes); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(validConfiguration); + long minPartSizeInBytes = resolver.minimalPartSizeInBytes(); + long threshold = resolver.thresholdInBytes(); + mpuHelper = new UploadObjectHelper(delegate, resolver); copyObjectHelper = new CopyObjectHelper(delegate, minPartSizeInBytes, threshold); } - private long computeApiCallBufferSize(MultipartConfiguration multipartConfiguration) { - return multipartConfiguration.minimumPartSizeInBytes() != null ? multipartConfiguration.minimumPartSizeInBytes() * 4 - : DEFAULT_API_CALL_BUFFER_SIZE; - } - @Override public CompletableFuture putObject(PutObjectRequest putObjectRequest, AsyncRequestBody requestBody) { return mpuHelper.uploadObject(putObjectRequest, requestBody); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java index 0700e8ade5f9..1ca499b57aa8 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java @@ -34,30 +34,28 @@ public final class UploadObjectHelper { private final long partSizeInBytes; private final GenericMultipartHelper genericMultipartHelper; - private final long maxMemoryUsageInBytes; + private final long apiCallBufferSize; private final long multipartUploadThresholdInBytes; private final UploadWithKnownContentLengthHelper uploadWithKnownContentLength; private final UploadWithUnknownContentLengthHelper uploadWithUnknownContentLength; public UploadObjectHelper(S3AsyncClient s3AsyncClient, - long partSizeInBytes, - long multipartUploadThresholdInBytes, - long maxMemoryUsageInBytes) { + MultipartConfigurationResolver resolver) { this.s3AsyncClient = s3AsyncClient; - this.partSizeInBytes = partSizeInBytes; + this.partSizeInBytes = resolver.minimalPartSizeInBytes(); this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, SdkPojoConversionUtils::toAbortMultipartUploadRequest, SdkPojoConversionUtils::toPutObjectResponse); - this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; - this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + this.apiCallBufferSize = resolver.apiCallBufferSize(); + this.multipartUploadThresholdInBytes = resolver.thresholdInBytes(); this.uploadWithKnownContentLength = new UploadWithKnownContentLengthHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, - maxMemoryUsageInBytes); + apiCallBufferSize); this.uploadWithUnknownContentLength = new UploadWithUnknownContentLengthHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, - maxMemoryUsageInBytes); + apiCallBufferSize); } public CompletableFuture uploadObject(PutObjectRequest putObjectRequest, diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java index f7d199ac3aa6..46caefca8d61 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java @@ -196,7 +196,9 @@ public void onSubscribe(Subscription s) { returnFuture.whenComplete((r, t) -> { if (t != null) { s.cancel(); - multipartUploadHelper.cancelingOtherOngoingRequests(futures, t); + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } } }); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java index ec79bb132ffe..ed9fed5f9103 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java @@ -74,6 +74,7 @@ import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; @@ -81,10 +82,12 @@ import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CompleteMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -93,6 +96,7 @@ import software.amazon.awssdk.services.s3.transform.AbortMultipartUploadRequestMarshaller; import software.amazon.awssdk.services.s3.transform.CompleteMultipartUploadRequestMarshaller; import software.amazon.awssdk.services.s3.transform.CreateMultipartUploadRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.DeleteObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.GetObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.PutObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.UploadPartRequestMarshaller; @@ -118,6 +122,7 @@ public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3P private final PutObjectRequestMarshaller putObjectRequestMarshaller; private final CreateMultipartUploadRequestMarshaller createMultipartUploadRequestMarshaller; private final UploadPartRequestMarshaller uploadPartRequestMarshaller; + private final DeleteObjectRequestMarshaller deleteObjectRequestMarshaller; private final CompleteMultipartUploadRequestMarshaller completeMultipartUploadRequestMarshaller; private final AbortMultipartUploadRequestMarshaller abortMultipartUploadRequestMarshaller; private final SdkClientConfiguration clientConfiguration; @@ -172,6 +177,9 @@ private DefaultS3Presigner(Builder b) { // Copied from DefaultS3Client#uploadPart this.uploadPartRequestMarshaller = new UploadPartRequestMarshaller(protocolFactory); + // Copied from DefaultS3Client#deleteObject + this.deleteObjectRequestMarshaller = new DeleteObjectRequestMarshaller(protocolFactory); + // Copied from DefaultS3Client#completeMultipartUpload this.completeMultipartUploadRequestMarshaller = new CompleteMultipartUploadRequestMarshaller(protocolFactory); @@ -247,6 +255,17 @@ public PresignedPutObjectRequest presignPutObject(PutObjectPresignRequest reques .build(); } + @Override + public PresignedDeleteObjectRequest presignDeleteObject(DeleteObjectPresignRequest request) { + return presign(PresignedDeleteObjectRequest.builder(), + request, + request.deleteObjectRequest(), + DeleteObjectRequest.class, + deleteObjectRequestMarshaller::marshall, + "DeleteObject") + .build(); + } + @Override public PresignedCreateMultipartUploadRequest presignCreateMultipartUpload(CreateMultipartUploadPresignRequest request) { return presign(PresignedCreateMultipartUploadRequest.builder(), diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java index 28e418974db8..be2500703e15 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java @@ -93,13 +93,19 @@ public Long apiCallBufferSizeInBytes() { public interface Builder extends CopyableBuilder { /** - * Configures the minimum number of bytes of the body of the request required for requests to be converted to their - * multipart equivalent. Only taken into account when converting {@code putObject} and {@code copyObject} requests. - * Any request whose size is less than the configured value will not use multipart operation, - * even if multipart is enabled via {@link S3AsyncClientBuilder#multipartEnabled(Boolean)}. + * Configure the size threshold, in bytes, for when to use multipart upload. Uploads/copies over this size will + * automatically use a multipart upload strategy, while uploads/copies smaller than this threshold will use a single + * connection to upload/copy the whole object. + * *

    + * Multipart uploads are easier to recover from and also potentially faster than single part uploads, especially when the + * upload parts can be uploaded in parallel. Because there are additional network API calls, small objects are still + * recommended to use a single connection for the upload. See + * Uploading and copying objects using + * multipart upload. * - * Default value: 8 Mib + *

    + * By default, it is the same as {@link #minimumPartSizeInBytes(Long)}. * * @param thresholdInBytes the value of the threshold to set. * @return an instance of this builder. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java index c2aa3e457403..81a55e7bece1 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java @@ -35,16 +35,19 @@ import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CompleteMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -339,6 +342,50 @@ default PresignedPutObjectRequest presignPutObject(Consumer + * Example Usage + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a DeleteObjectRequest to be pre-signed
    +     *     DeleteObjectRequest deleteObjectRequest = ...;
    +     *
    +     *     // Create a PutObjectPresignRequest to specify the signature duration
    +     *     DeleteObjectPresignRequest deleteObjectPresignRequest =
    +     *         DeleteObjectPresignRequest.builder()
    +     *                                   .signatureDuration(Duration.ofMinutes(10))
    +     *                                   .deleteObjectRequest(deleteObjectRequest)
    +     *                                   .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedDeleteObjectRequest presignedDeleteObjectRequest =
    +     *         presigner.presignDeleteObject(deleteObjectPresignRequest);
    +     * }
    +     * 
    + */ + PresignedDeleteObjectRequest presignDeleteObject(DeleteObjectPresignRequest request); + + /** + * Presign a {@link DeleteObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignDeleteObject(DeleteObjectPresignRequest)} without needing + * to call {@code DeleteObjectPresignRequest.builder()} or {@code .build()}. + * + * @see #presignDeleteObject(PresignedDeleteObjectRequest) + */ + default PresignedDeleteObjectRequest presignDeleteObject(Consumer request) { + DeleteObjectPresignRequest.Builder builder = DeleteObjectPresignRequest.builder(); + request.accept(builder); + return presignDeleteObject(builder.build()); + } + + /** * Presign a {@link CreateMultipartUploadRequest} so that it can be executed at a later time without requiring additional * signing or authentication. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java new file mode 100644 index 000000000000..3fce17b22f5d --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link DeleteObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class DeleteObjectPresignRequest extends PresignRequest + implements ToCopyableBuilder { + private final DeleteObjectRequest deleteObjectRequest; + + protected DeleteObjectPresignRequest(DefaultBuilder builder) { + super(builder); + this.deleteObjectRequest = Validate.notNull(builder.deleteObjectRequest, "deleteObjectRequest"); + } + + /** + * Retrieve the {@link DeleteObjectRequest} that should be presigned. + */ + public DeleteObjectRequest deleteObjectRequest() { + return deleteObjectRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * Create a builder that can be used to create a {@link DeleteObjectPresignRequest}. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + DeleteObjectPresignRequest that = (DeleteObjectPresignRequest) o; + + return deleteObjectRequest.equals(that.deleteObjectRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + deleteObjectRequest.hashCode(); + return result; + } + + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + Builder deleteObjectRequest(DeleteObjectRequest deleteObjectRequest); + + default Builder deleteObjectRequest(Consumer deleteObjectRequest) { + DeleteObjectRequest.Builder builder = DeleteObjectRequest.builder(); + deleteObjectRequest.accept(builder); + return deleteObjectRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + DeleteObjectPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private DeleteObjectRequest deleteObjectRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(DeleteObjectPresignRequest deleteObjectPresignRequest) { + super(deleteObjectPresignRequest); + this.deleteObjectRequest = deleteObjectPresignRequest.deleteObjectRequest; + } + + @Override + public Builder deleteObjectRequest(DeleteObjectRequest deleteObjectRequest) { + this.deleteObjectRequest = deleteObjectRequest; + return this; + } + + @Override + public DeleteObjectPresignRequest build() { + return new DeleteObjectPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java new file mode 100644 index 000000000000..3ce2d2569965 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link DeleteObjectRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedDeleteObjectRequest extends PresignedRequest + implements ToCopyableBuilder { + + protected PresignedDeleteObjectRequest(DefaultBuilder builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * Create a builder that can be used to create a {@link PresignedDeleteObjectRequest}. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * A builder for a {@link PresignedDeleteObjectRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedDeleteObjectRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignedRequest.DefaultBuilder + implements PresignedDeleteObjectRequest.Builder { + + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedDeleteObjectRequest presignedDeleteObjectRequest) { + super(presignedDeleteObjectRequest); + } + + @Override + public PresignedDeleteObjectRequest build() { + return new PresignedDeleteObjectRequest(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json index cad90f95e8f1..4b528a224f58 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json @@ -303,7 +303,6 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": false } @@ -940,10 +939,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -980,10 +977,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -1022,10 +1017,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3842,8 +3835,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3882,8 +3874,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3923,8 +3914,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3963,8 +3953,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4003,8 +3992,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4044,8 +4032,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4073,8 +4060,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4112,8 +4098,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4152,8 +4137,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4219,8 +4203,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4259,8 +4242,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4300,8 +4282,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4340,8 +4321,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4380,8 +4360,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4421,8 +4400,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4450,8 +4428,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4490,8 +4467,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4558,8 +4534,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4599,8 +4574,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4626,10 +4600,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4668,8 +4640,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4706,8 +4677,7 @@ "Bucket": "99a_b", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4746,8 +4716,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4803,8 +4772,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4844,8 +4812,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4871,10 +4838,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4913,8 +4878,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4951,8 +4915,7 @@ "Bucket": "99a_b", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4991,8 +4954,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5060,8 +5022,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5101,8 +5062,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5128,10 +5088,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5170,8 +5128,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5208,8 +5165,7 @@ "Bucket": "99a_b", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5249,8 +5205,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5291,8 +5246,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5321,8 +5275,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5351,8 +5304,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5381,8 +5333,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5421,10 +5372,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5464,8 +5413,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5506,8 +5454,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5550,8 +5497,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5605,10 +5551,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5648,8 +5592,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5690,8 +5633,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5720,8 +5662,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5750,8 +5691,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5780,8 +5720,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5820,10 +5759,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5860,10 +5797,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5901,10 +5836,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5930,10 +5863,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5972,10 +5903,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6012,10 +5941,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6028,7 +5955,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true } @@ -6056,10 +5982,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6072,7 +5996,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": true } @@ -6111,10 +6034,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6152,10 +6073,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6181,10 +6100,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6223,10 +6140,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6329,10 +6244,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6361,10 +6274,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6403,10 +6314,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6443,10 +6352,8 @@ "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6473,10 +6380,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -7224,10 +7129,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java index 760eb86b959a..b413f7b33e01 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java @@ -23,8 +23,6 @@ import java.time.Clock; import java.time.Duration; import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZonedDateTime; import org.assertj.core.data.Offset; @@ -32,32 +30,26 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; -import software.amazon.awssdk.auth.signer.internal.AbstractAws4Signer; import software.amazon.awssdk.auth.signer.internal.AbstractAwsS3V4Signer; -import software.amazon.awssdk.auth.signer.internal.Aws4SignerRequestParams; import software.amazon.awssdk.auth.signer.internal.SignerConstant; import software.amazon.awssdk.auth.signer.params.Aws4PresignerParams; -import software.amazon.awssdk.auth.signer.params.AwsS3V4SignerParams; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.checksums.ChecksumConstant; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.RequestPayer; import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; -import software.amazon.awssdk.utils.DateUtils; @RunWith(MockitoJUnitRunner.class) public class S3PresignerTest { @@ -349,6 +341,116 @@ public void putObject_Sigv4PresignerHonorsSignatureDuration() { }); } + @Test + public void deleteObject_IsNotUrlCompatible() { + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().keySet()).containsExactlyInAnyOrder("host"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void deleteObject_EndpointOverrideIsIncludedInPresignedUrl() { + S3Presigner presigner = presignerBuilder().endpointOverride(URI.create("http://foo.com")).build(); + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + + assertThat(presigned.url().toString()).startsWith("http://foo34343434.foo.com/bar?"); + assertThat(presigned.signedHeaders().get("host")).containsExactly("foo34343434.foo.com"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void deleteObject_CredentialsCanBeOverriddenAtTheRequestLevel() { + AwsCredentials clientCredentials = AwsBasicCredentials.create("a", "a"); + AwsCredentials requestCredentials = AwsBasicCredentials.create("b", "b"); + + S3Presigner presigner = presignerBuilder().credentialsProvider(() -> clientCredentials).build(); + + + AwsRequestOverrideConfiguration overrideConfiguration = + AwsRequestOverrideConfiguration.builder() + .credentialsProvider(() -> requestCredentials) + .build(); + + PresignedDeleteObjectRequest presignedWithClientCredentials = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + + PresignedDeleteObjectRequest presignedWithRequestCredentials = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(overrideConfiguration))); + + + assertThat(presignedWithClientCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("a"); + assertThat(presignedWithRequestCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("b"); + } + + @Test + public void deleteObject_AdditionalHeadersAndQueryStringsCanBeAdded() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .putHeader("X-Amz-AdditionalHeader", "foo1") + .putRawQueryParameter("additionalQueryParam", "foo2") + .build(); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders()).containsOnlyKeys("host", "x-amz-additionalheader"); + assertThat(presigned.signedHeaders().get("x-amz-additionalheader")).containsExactly("foo1"); + assertThat(presigned.httpRequest().headers()).containsKeys("x-amz-additionalheader"); + assertThat(presigned.httpRequest().rawQueryParameters().get("additionalQueryParam").get(0)).isEqualTo("foo2"); + } + + @Test + public void deleteObject_NonSigV4SignersRaisesException() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(new NoOpSigner()) + .build(); + + assertThatThrownBy(() -> presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override)))) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("NoOpSigner"); + } + + @Test + public void deleteObject_Sigv4PresignerHonorsSignatureDuration() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofSeconds(1234)) + .deleteObjectRequest(delo -> delo.bucket("a") + .key("b") + .overrideConfiguration(override))); + + assertThat(presigned.httpRequest().rawQueryParameters().get("X-Amz-Expires").get(0)).satisfies(expires -> { + assertThat(expires).containsOnlyDigits(); + assertThat(Integer.parseInt(expires)).isCloseTo(1234, Offset.offset(2)); + }); + } + @Test public void getObject_S3ConfigurationCanBeOverriddenToLeverageTransferAcceleration() { S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java new file mode 100644 index 000000000000..99e929c09f4e --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; + +public class MultipartConfigurationResolverTest { + + @Test + void resolveThresholdInBytes_valueNotProvided_shouldSameAsPartSize() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.thresholdInBytes()).isEqualTo(10L); + } + + @Test + void resolveThresholdInBytes_valueProvided_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(1L) + .thresholdInBytes(12L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.thresholdInBytes()).isEqualTo(12L); + } + + @Test + void resolveApiCallBufferSize_valueProvided_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .apiCallBufferSizeInBytes(100L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.apiCallBufferSize()).isEqualTo(100L); + } + + @Test + void resolveApiCallBufferSize_valueNotProvided_shouldComputeBasedOnPartSize() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.apiCallBufferSize()).isEqualTo(40L); + } + + @Test + void valueProvidedForAllFields_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .thresholdInBytes(8L) + .apiCallBufferSizeInBytes(3L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.minimalPartSizeInBytes()).isEqualTo(10L); + assertThat(resolver.thresholdInBytes()).isEqualTo(8L); + assertThat(resolver.apiCallBufferSize()).isEqualTo(3L); + } + + @Test + void noValueProvided_shouldUseDefault() { + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(MultipartConfiguration.builder() + .build()); + assertThat(resolver.minimalPartSizeInBytes()).isEqualTo(8L * 1024 * 1024); + assertThat(resolver.thresholdInBytes()).isEqualTo(8L * 1024 * 1024); + assertThat(resolver.apiCallBufferSize()).isEqualTo(8L * 1024 * 1024 * 4); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java index 11d54a73fb72..9758b77a9d84 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java @@ -38,6 +38,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; @@ -61,6 +62,7 @@ import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.utils.CompletableFutureUtils; @@ -97,7 +99,12 @@ public static Stream asyncRequestBody() { @BeforeEach public void beforeEach() { s3AsyncClient = Mockito.mock(S3AsyncClient.class); - uploadHelper = new UploadObjectHelper(s3AsyncClient, PART_SIZE, THRESHOLD, PART_SIZE * 2); + uploadHelper = new UploadObjectHelper(s3AsyncClient, + new MultipartConfigurationResolver(MultipartConfiguration.builder() + .minimumPartSizeInBytes(PART_SIZE) + .thresholdInBytes(THRESHOLD) + .thresholdInBytes(PART_SIZE * 2) + .build())); } @ParameterizedTest @@ -187,7 +194,8 @@ void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncReques CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, asyncRequestBody); - assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + assertThatThrownBy(() -> future.get(100, TimeUnit.MILLISECONDS)) + .hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); verify(s3AsyncClient, never()).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -197,10 +205,10 @@ void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncReques assertThat(actualRequest.uploadId()).isEqualTo(UPLOAD_ID); try { - ongoingRequest.get(1, TimeUnit.MILLISECONDS); + ongoingRequest.get(100, TimeUnit.MILLISECONDS); fail("no exception thrown"); } catch (Exception e) { - assertThat(e.getCause()).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + assertThat(e.getCause()).isEqualTo(exception); } } @@ -241,9 +249,17 @@ void upload_knownContentLengthCancelResponseFuture_shouldCancelUploadPart() { CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)); + when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + future.cancel(true); - assertThat(ongoingRequest).isCancelled(); + try { + ongoingRequest.join(); + fail("no exception"); + } catch (Exception exception) { + assertThat(ongoingRequest).isCancelled(); + } } @ParameterizedTest diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 17e829b4330a..fac6053fc2c9 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java deleted file mode 100644 index 40930fae167f..000000000000 --- a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3control.internal.interceptors; - - -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.CLIENT_ENDPOINT; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.ENDPOINT_OVERRIDDEN; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.ENDPOINT_PREFIX; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.S3_OUTPOSTS; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isDualstackEnabled; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsEnabledInClientConfig; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegion; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegion; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isUseArnRegionEnabledInClientConfig; -import static software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute.S3_ARNABLE_FIELD; - -import java.net.URI; -import java.util.Optional; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.PartitionMetadata; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; -import software.amazon.awssdk.services.s3.internal.resource.S3Resource; -import software.amazon.awssdk.services.s3.internal.settingproviders.UseArnRegionProviderChain; -import software.amazon.awssdk.services.s3control.S3ControlConfiguration; -import software.amazon.awssdk.services.s3control.internal.S3ArnableField; -import software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter; -import software.amazon.awssdk.utils.StringUtils; -import software.amazon.awssdk.utils.Validate; - -/** - * Execution interceptor which modifies the HTTP request to S3 Control to - * change the endpoint to the correct endpoint. This includes prefixing the AWS - * account identifier and, when enabled, adding in FIPS and dualstack. - */ -@SdkInternalApi -public final class EndpointAddressInterceptor implements ExecutionInterceptor { - private static final String X_AMZ_OUTPOST_ID_HEADER = "x-amz-outpost-id"; - private static final UseArnRegionProviderChain USE_ARN_REGION_RESOLVER = UseArnRegionProviderChain.create(); - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - Optional requestArn = getRequestArn(executionAttributes); - - if (requestArn.isPresent()) { - return resolveHostForOutpostArnRequest(context.httpRequest(), executionAttributes, requestArn.get()); - } else if (isNonArnOutpostRequest(context.request())) { - return resolveHostForOutpostNonArnRequest(context.httpRequest(), executionAttributes); - } else { - return resolveHostForNonOutpostNonArnRequest(context.httpRequest(), executionAttributes); - } - } - - private SdkHttpRequest resolveHostForOutpostArnRequest(SdkHttpRequest request, - ExecutionAttributes executionAttributes, - Arn arn) { - S3Resource s3Resource = S3ControlArnConverter.getInstance().convertArn(arn); - - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - String signingRegion = executionAttributes.getAttribute(SIGNING_REGION).id(); - String arnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException("Region is missing from ARN.")); - String arnPartion = arn.partition(); - S3Resource parentS3Resource = s3Resource.parentS3Resource().orElse(null); - - Validate.isTrue(!isFipsInvolved(signingRegion, arnRegion, serviceConfig), - "FIPS is not supported for outpost requests."); - - // Even though we validated that we're not *calling* a FIPS region, the client region may still be a FIPS region if we're - // using the ARN region. For that reason, we need to strip off the "fips" from the signing region before we get the - // partition to make sure we're not making a cross-partition call. - signingRegion = removeFipsIfNeeded(signingRegion); - - String signingPartition = PartitionMetadata.of(Region.of(signingRegion)).id(); - - S3OutpostResource outpostResource = Validate.isInstanceOf(S3OutpostResource.class, parentS3Resource, - "The ARN passed must have a parent outpost resource."); - Validate.isTrue(!isDualstackEnabled(serviceConfig), "Dual stack endpoints are not supported for outpost requests."); - Validate.isTrue(arnPartion.equals(signingPartition), - "The partition field of the ARN being passed as a bucket parameter to an S3 operation does not match " - + "the partition the client has been configured with. Provided partition: '%s'; client partition: '%s'.", - arnPartion, signingPartition); - Validate.isTrue(useArnRegion(serviceConfig) || arnRegion.equals(signingRegion), - "The region field of the ARN being passed as a bucket parameter to an operation does not match the " - + "region the client was configured with. Provided region: '%s'; client region: '%s'.", - arnRegion, signingRegion); - - executionAttributes.putAttribute(SIGNING_REGION, Region.of(arnRegion)); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); - - SdkHttpRequest.Builder requestBuilder = request.toBuilder() - .appendHeader(X_AMZ_OUTPOST_ID_HEADER, outpostResource.outpostId()); - - if (isEndpointOverridden(executionAttributes)) { - // Drop endpoint prefix for ARN-based requests - requestBuilder.host(endpointOverride(executionAttributes).getHost()); - } else { - String arnPartitionDnsSuffix = PartitionMetadata.of(arnPartion).dnsSuffix(); - requestBuilder.host(String.format("s3-outposts.%s.%s", arnRegion, arnPartitionDnsSuffix)); - } - - return requestBuilder.build(); - } - - private SdkHttpRequest resolveHostForOutpostNonArnRequest(SdkHttpRequest sdkHttpRequest, - ExecutionAttributes executionAttributes) { - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - Region signingRegion = executionAttributes.getAttribute(SIGNING_REGION); - - Validate.isTrue(!isDualstackEnabled(serviceConfig), - "Dual stack is not supported for outpost requests."); - Validate.isTrue(!isFipsEnabledInClientConfig(serviceConfig) && !isFipsRegion(signingRegion.id()), - "FIPS endpoints are not supported for outpost requests."); - - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); - - if (isEndpointOverridden(executionAttributes)) { - // Preserve endpoint prefix for endpoint-overridden non-ARN-based requests - return sdkHttpRequest; - } else { - String signingDnsSuffix = PartitionMetadata.of(signingRegion).dnsSuffix(); - return sdkHttpRequest.copy(r -> r.host(String.format("s3-outposts.%s.%s", signingRegion, signingDnsSuffix))); - } - } - - private SdkHttpRequest resolveHostForNonOutpostNonArnRequest(SdkHttpRequest request, - ExecutionAttributes executionAttributes) { - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - - boolean isDualStackEnabled = isDualstackEnabled(serviceConfig); - boolean isFipsEnabledInClient = isFipsEnabledInClientConfig(serviceConfig); - - Validate.isTrue(!isDualStackEnabled || !isFipsEnabledInClient, "Dual stack and FIPS are not supported together."); - - if (isEndpointOverridden(executionAttributes)) { - Validate.isTrue(!isDualStackEnabled, "Dual stack is not supported with endpoint overrides."); - Validate.isTrue(!isFipsEnabledInClient, "FIPS is not supported with endpoint overrides."); - // Preserve endpoint prefix for endpoint-overridden non-ARN-based requests - return request; - } else if (isDualStackEnabled) { - String newEndpointPrefix = ENDPOINT_PREFIX + "." + "dualstack"; - return request.copy(r -> r.host(StringUtils.replace(request.host(), ENDPOINT_PREFIX, newEndpointPrefix))); - } else if (isFipsEnabledInClient) { - String newEndpointPrefix = ENDPOINT_PREFIX + "-" + "fips"; - return request.copy(r -> r.host(StringUtils.replace(request.host(), ENDPOINT_PREFIX, newEndpointPrefix))); - } else { - return request; - } - } - - private Optional getRequestArn(ExecutionAttributes executionAttributes) { - return Optional.ofNullable(executionAttributes.getAttribute(S3_ARNABLE_FIELD)) - .map(S3ArnableField::arn); - } - - private boolean isNonArnOutpostRequest(SdkRequest request) { - return request.getValueForField("OutpostId", String.class) - .map(StringUtils::isNotBlank) - .orElse(false); - } - - private S3ControlConfiguration getServiceConfig(ExecutionAttributes executionAttributes) { - return (S3ControlConfiguration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG); - } - - private boolean useArnRegion(S3ControlConfiguration configuration) { - // If useArnRegion is false, it was not set to false by the customer, it was simply not enabled - if (isUseArnRegionEnabledInClientConfig(configuration)) { - return true; - } - - return USE_ARN_REGION_RESOLVER.resolveUseArnRegion().orElse(false); - } - - private boolean isEndpointOverridden(ExecutionAttributes executionAttributes) { - return Boolean.TRUE.equals(executionAttributes.getAttribute(ENDPOINT_OVERRIDDEN)); - } - - private URI endpointOverride(ExecutionAttributes executionAttributes) { - return executionAttributes.getAttribute(CLIENT_ENDPOINT); - } - - private boolean isFipsInvolved(String signingRegion, String arnRegion, S3ControlConfiguration serviceConfig) { - if (serviceConfig.fipsModeEnabled()) { - return true; - } - - return isFipsRegion(signingRegion) || isFipsRegion(arnRegion); - } - - private String removeFipsIfNeeded(String region) { - if (region.startsWith("fips-")) { - return StringUtils.replace(region, "fips-", ""); - } - - if (region.endsWith("-fips")) { - return StringUtils.replace(region, "-fips", ""); - } - return region; - } -} diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json index ac2587dad9ad..8e4ec39fae7a 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -61,148 +61,265 @@ }, "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { "ref": "Region" + }, + "snow" + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "S3 Snow does not support DualStack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "S3 Snow does not support FIPS", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "OutpostId" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "snow" + true ] }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ { "fn": "isSet", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" } ] }, { - "fn": "parseURL", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ], - "assign": "url" + ] } ], - "type": "tree", - "rules": [ + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ { - "conditions": [ + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ { - "fn": "aws.partition", + "fn": "isValidHostLabel", "argv": [ { - "ref": "Region" - } - ], - "assign": "partitionResult" + "ref": "AccountId" + }, + false + ] } - ], - "type": "tree", - "rules": [ + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" + }, + { + "conditions": [ + { + "fn": "not", + "argv": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "isValidHostLabel", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "S3 Snow does not support Dual-stack", - "type": "error" + "ref": "OutpostId" }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "S3 Snow does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } + false ] } ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" } - ] + ], + "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ { - "fn": "isSet", + "fn": "isValidHostLabel", "argv": [ { - "ref": "OutpostId" - } + "ref": "Region" + }, + true ] } ], @@ -211,318 +328,716 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outposts do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } ], - "assign": "partitionResult" + "assign": "url" } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" + "ref": "UseFIPS" }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccessPointName" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "AccessPointName" + } + ], + "assign": "accessPointArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[0]" + ], + "assign": "arnType" + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "arnType" + }, + "" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "service" + ] + }, + "s3-outposts" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "outpostId" + }, + false + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseArnRegion" + }, + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - } + }, + "{Region}" ] } + ] + } + ], + "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", + "type": "error" + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } ], - "error": "AccountId is required but not set", - "type": "error" - }, + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "aws.partition", + "argv": [ { - "fn": "isSet", + "fn": "getAttr", "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - }, + } + ], + "assign": "arnPartition" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "not", + "fn": "stringEquals", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", "argv": [ { - "ref": "AccountId" + "ref": "partitionResult" }, - false + "name" ] } ] } ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "not", + "fn": "isValidHostLabel", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", "argv": [ { - "ref": "OutpostId" + "ref": "accessPointArn" }, - false + "region" ] - } + }, + true ] } ], - "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isValidHostLabel", + "fn": "not", "argv": [ { - "ref": "Region" - }, - true + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + "" + ] + } ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + false + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountId" + }, + "{accessPointArn#accountId}" + ] + } ] } ], - "error": "Invalid configuration: Outposts do not support dual-stack", + "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[2]" + ], + "assign": "outpostType" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", + "fn": "getAttr", "argv": [ { - "ref": "Endpoint" - } + "ref": "accessPointArn" + }, + "resourceId[3]" ], - "assign": "url" + "assign": "accessPointName" } ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ + "type": "tree", + "rules": [ + { + "conditions": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "fn": "stringEquals", + "argv": [ + { + "ref": "outpostType" + }, + "accesspoint" + ] + } + ], + "type": "tree", + "rules": [ { - "ref": "UseFIPS" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, - "headers": {} - }, - "type": "endpoint" + { + "conditions": [], + "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", + "type": "error" + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid ARN: expected an access point name", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: Expected a 4-component resource", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", + "error": "Invalid ARN: missing account ID", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "Bucket" + } + ], + "assign": "bucketArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[0]" + ], + "assign": "arnType" }, { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } + "ref": "arnType" + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "aws.parseArn", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } - ], - "assign": "accessPointArn" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "service" + ] + }, + "s3-outposts" + ] } ], "type": "tree", @@ -530,28 +1045,29 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" - }, + true + ] + } + ], + "error": "Invalid configuration: Outpost buckets do not support dual-stack", + "type": "error" + }, + { + "conditions": [ { - "fn": "not", + "fn": "getAttr", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] - } - ] + "ref": "bucketArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" } ], "type": "tree", @@ -559,18 +1075,12 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "isValidHostLabel", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "service" - ] + "ref": "outpostId" }, - "s3-outposts" + false ] } ], @@ -578,40 +1088,107 @@ "rules": [ { "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, { "fn": "booleanEquals", "argv": [ { - "ref": "UseDualStack" + "ref": "UseArnRegion" }, - true + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + }, + "{Region}" + ] + } ] } ], - "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + } + ], + "assign": "arnPartition" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "getAttr", + "fn": "aws.partition", "argv": [ { - "ref": "accessPointArn" - }, - "resourceId[1]" + "ref": "Region" + } ], - "assign": "outpostId" + "assign": "partitionResult" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -620,522 +1197,439 @@ "fn": "isValidHostLabel", "argv": [ { - "ref": "outpostId" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] }, - false + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "UseArnRegion" - } + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "booleanEquals", + "fn": "isValidHostLabel", "argv": [ { - "ref": "UseArnRegion" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] }, false ] - }, + } + ], + "type": "tree", + "rules": [ { - "fn": "not", - "argv": [ + "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "AccountId" }, - "region" + "{bucketArn#accountId}" ] - }, - "{Region}" + } ] } - ] - } - ], - "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ + ], + "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", + "type": "error" + }, { "conditions": [ { - "fn": "aws.partition", + "fn": "getAttr", "argv": [ { - "ref": "Region" - } + "ref": "bucketArn" + }, + "resourceId[2]" ], - "assign": "partitionResult" + "assign": "outpostType" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[3]" + ], + "assign": "bucketName" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "fn": "getAttr", + "ref": "outpostType" + }, + "bucket" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseFIPS" }, - "region" + true ] } ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, { - "conditions": [ + "fn": "parseURL", + "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] + "ref": "Endpoint" } ], - "type": "tree", - "rules": [ + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{accessPointArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[3]" - ], - "assign": "accessPointName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "accesspoint" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected an access point name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", - "type": "error" - } - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" } ] }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] } - ] + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, { "conditions": [], - "error": "Could not load partition for ARN region `{accessPointArn#region}`", + "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: expected a bucket name", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: Expected a 4-component resource", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: missing account ID", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: No ARN type specified", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "Region" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ] + } + ], + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "AccountId" + }, + false + ] + } + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ @@ -1143,18 +1637,18 @@ "fn": "isSet", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ] }, { - "fn": "aws.parseArn", + "fn": "parseURL", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ], - "assign": "bucketArn" + "assign": "url" } ], "type": "tree", @@ -1162,1304 +1656,493 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "bucketArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] }, { - "fn": "not", + "fn": "booleanEquals", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [ + "endpoint": { + "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "service" - ] - }, - "s3-outposts" - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outpost buckets do not support dual-stack", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[1]" - ], - "assign": "outpostId" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "outpostId" - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "UseArnRegion" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseArnRegion" - }, - false - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - "{Region}" - ] - } - ] - } - ], - "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - } - ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{bucketArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[3]" - ], - "assign": "bucketName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "bucket" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected a bucket name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Could not load partition for ARN region `{bucketArn#region}`", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: No ARN type specified", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "Region" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ] - } - ], - "error": "AccountId is required but not set", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "AccountId" - }, - false - ] - } - ] - } - ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - }, + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", - "type": "error" + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" } ] }, { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ] + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Region must be set", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Region must be set", + "type": "error" } ] } \ No newline at end of file diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json index afb8c2754420..6d2656e10b99 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json @@ -771,7 +771,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -811,7 +810,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -849,7 +847,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "Region": "us-east-2", "RequiresAccountId": false, "UseDualStack": false, @@ -880,14 +877,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -909,7 +905,7 @@ } ] }, - "url": "https://123.s3-control.us-east-2.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-2.amazonaws.com" } }, "operationInputs": [ @@ -919,13 +915,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, @@ -957,14 +952,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "CreateBucket", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1131,7 +1125,7 @@ { "documentation": "Account ID set inline and in ARN and they do not match@us-west-2", "expect": { - "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`9999999`)" + "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`999999999999`)" }, "operationInputs": [ { @@ -1141,14 +1135,14 @@ }, "operationName": "GetAccessPoint", "operationParams": { - "AccountId": "9999999", + "AccountId": "999999999999", "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" } } ], "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "9999999", + "AccountId": "999999999999", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -1190,7 +1184,6 @@ "AccessPointName": "apname", "AccountId": "123456789012", "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1239,7 +1232,6 @@ "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1254,7 +1246,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1269,7 +1260,6 @@ "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1296,7 +1286,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1337,14 +1326,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1376,15 +1365,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1418,15 +1406,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1468,7 +1455,6 @@ "params": { "Bucket": "blah", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -1484,7 +1470,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -1499,7 +1484,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1540,7 +1524,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1584,7 +1569,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1628,7 +1614,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1673,7 +1660,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1718,7 +1706,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1776,7 +1765,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1820,7 +1810,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1864,7 +1855,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1909,7 +1901,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1954,7 +1947,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2012,7 +2006,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -2056,7 +2051,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2100,7 +2096,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2145,7 +2142,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2190,7 +2188,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2356,11 +2355,11 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2501,7 +2500,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2511,12 +2510,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2561,7 +2560,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2572,12 +2571,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2598,7 +2597,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.dualstack.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.dualstack.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2610,12 +2609,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": true, @@ -2636,7 +2635,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2647,12 +2646,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com" @@ -2704,7 +2703,7 @@ } }, { - "documentation": "account id with custom endpoint, fips and dualstack", + "documentation": "account id with custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2717,7 +2716,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2729,21 +2728,20 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { - "documentation": "custom endpoint, fips and dualstack", + "documentation": "custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2762,8 +2760,7 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { @@ -2786,32 +2783,19 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": false + "UseFIPS": true } }, { - "documentation": "custom endpoint, dualstack", + "documentation": "custom endpoint, DualStack", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://example.com" - } + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "params": { "Region": "us-east-1", "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualstack": true + "UseDualStack": true } }, { @@ -2835,7 +2819,6 @@ "error": "AccountId is required but not set" }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2862,7 +2845,6 @@ ], "params": { "AccountId": "/?invalid¬-host*label", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2943,7 +2925,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2978,7 +2959,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2986,9 +2966,9 @@ } }, { - "documentation": "Dualstack + Custom endpoint is not supported(non-arn)", + "documentation": "DualStack + Custom endpoint is not supported(non-arn)", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "operationInputs": [ { @@ -3008,7 +2988,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3029,14 +3008,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3063,7 +3042,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "cn-north-1", "RequiresAccountId": true, @@ -3090,7 +3068,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "?outpost/invalid+", "Region": "us-west-1", "RequiresAccountId": true, @@ -3118,7 +3095,6 @@ "error": "Invalid region: region was not a valid DNS name." }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "invalid-region 42", "AccountId": "0123456", @@ -3145,7 +3121,6 @@ } }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-west-2", "UseDualStack": false, @@ -3205,14 +3180,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -3308,7 +3283,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -3333,13 +3309,13 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": true, @@ -3568,22 +3544,20 @@ "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": true, - "UseDualStack": false, - "Accelerate": false + "UseDualStack": false } }, { - "documentation": "S3 Snow Control with Dual-stack enabled", + "documentation": "S3 Snow Control with Dualstack enabled", "expect": { - "error": "S3 Snow does not support Dual-stack" + "error": "S3 Snow does not support DualStack" }, "params": { "Region": "snow", "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": false, - "UseDualStack": true, - "Accelerate": false + "UseDualStack": true } } ], diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java deleted file mode 100644 index ce035d11b194..000000000000 --- a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.awssdk.services.s3control.internal.interceptors; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.SERVICE_CONFIG; -import static software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute.S3_ARNABLE_FIELD; - -import java.util.Optional; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.core.Protocol; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpMethod; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3control.S3ControlClient; -import software.amazon.awssdk.services.s3control.S3ControlConfiguration; -import software.amazon.awssdk.services.s3control.internal.S3ArnableField; -import software.amazon.awssdk.services.s3control.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3control.model.ListRegionalBucketsRequest; - -public class EndpointAddressInterceptorTest { - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private static final String X_AMZ_ACCOUNT_ID = "x-amz-account-id"; - private static final String ACCOUNT_ID = "123456789012"; - - private SdkHttpRequest request; - private S3ControlConfiguration configuration; - private ExecutionAttributes executionAttributes; - - @Before - public void setup() { - request = SdkHttpFullRequest.builder() - .appendHeader(X_AMZ_ACCOUNT_ID, ACCOUNT_ID) - .protocol(Protocol.HTTPS.toString()) - .method(SdkHttpMethod.POST) - .host(S3ControlClient.serviceMetadata().endpointFor(Region.US_EAST_1).toString()) - .build(); - configuration = S3ControlConfiguration.builder().build(); - executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3-control"); - executionAttributes.putAttribute(SIGNING_REGION, Region.of("us-east-1")); - executionAttributes.putAttribute(SERVICE_CONFIG, configuration); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_StandardSettings() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), new ExecutionAttributes()); - assertThat(modified.host()).isEqualTo("s3-control.us-east-1.amazonaws.com"); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_Dualstack() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_Fips() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); - } - - @Test - public void createBucketRequestWithOutpostId_shouldRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - CreateBucketRequest createBucketRequest = CreateBucketRequest.builder().outpostId("1234").build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(createBucketRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestsWithOutpostId_shouldRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().outpostId("1234").build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestsWithoutOutpostId_shouldNotRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .dualstackEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); - assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); - } - - @Test - public void createBucketRequestsWithoutOutpostId_shouldNotRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); - assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestWithOutpostId_fipsEnabled_shouldThrowException() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .outpostId("123") - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes)).hasMessageContaining("FIPS endpoints are " - + "not supported"); - } - - @Test - public void listRegionalBucketsRequestWithOutpostId_fipsDualsackEnabled_shouldThrowException() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .outpostId("123") - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes)) - .hasMessageContaining("Dual stack"); - } - - @Test(expected = IllegalArgumentException.class) - public void modifyHttpRequest_ThrowsException_FipsAndDualstack() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .dualstackEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - @Test - public void outpostBucketArn_shouldResolveHost() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - SdkHttpRequest modifiedRequest = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - - assertThat(modifiedRequest.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0)).isEqualTo("op-01234567890123456"); - assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0)).isEqualTo(ACCOUNT_ID); - } - - @Test - public void outpostAccessPointArn_shouldResolveHost() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - SdkHttpRequest modifiedRequest = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - - assertThat(modifiedRequest.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0)).isEqualTo("op-01234567890123456"); - assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0)).isEqualTo(ACCOUNT_ID); - } - - @Test - public void outpostArnWithFipsEnabled_shouldThrowException() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("FIPS"); - - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - executionAttributes.putAttribute(SERVICE_CONFIG, enableFips()); - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - @Test - public void outpostArnWithDualstackEnabled_shouldThrowException() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Dual stack"); - - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - executionAttributes.putAttribute(SERVICE_CONFIG, enableDualstack()); - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - private S3ControlConfiguration enableDualstack() { - return S3ControlConfiguration.builder() - .dualstackEnabled(true) - .build(); - } - - private S3ControlConfiguration enableFips() { - return S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .build(); - } - - public final class Context implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { - - private final SdkHttpRequest request; - private SdkRequest sdkRequest = CreateBucketRequest.builder().build(); - - public Context(SdkHttpRequest request) { - this.request = request; - } - - public Context request(SdkRequest sdkRequest) { - this.sdkRequest = sdkRequest; - return this; - } - - @Override - public SdkRequest request() { - return sdkRequest; - } - - @Override - public SdkHttpRequest httpRequest() { - return request; - } - - @Override - public Optional requestBody() { - return Optional.empty(); - } - - @Override - public Optional asyncRequestBody() { - return Optional.empty(); - } - } -} diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 1bf58c4535e0..f786c1e5277e 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 5a015027b4f3..440e2ee55094 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index ab6de9858fc6..08d9efc114a0 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -279,7 +279,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

    Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

    Use this API to deploy models using SageMaker hosting services.

    For an example that calls this method when deploying a model to SageMaker hosting services, see the Create Endpoint example notebook.

    You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

    The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

    When it receives the request, SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

    When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

    When SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

    If any of the models hosted at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your Amazon Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

    To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

    • Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

    • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

      \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

      \"Resource\": [

      \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

      \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

      ]

      For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference.

    " + "documentation":"

    Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

    Use this API to deploy models using SageMaker hosting services.

    You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

    The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

    When it receives the request, SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

    When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

    When SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

    If any of the models hosted at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your Amazon Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

    To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

    • Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

    • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

      \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

      \"Resource\": [

      \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

      \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

      ]

      For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference.

    " }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -5860,6 +5860,10 @@ "WorkspaceSettings":{ "shape":"WorkspaceSettings", "documentation":"

    The workspace settings for the SageMaker Canvas application.

    " + }, + "IdentityProviderOAuthSettings":{ + "shape":"IdentityProviderOAuthSettings", + "documentation":"

    The settings for connecting to an external data source with OAuth.

    " } }, "documentation":"

    The SageMaker Canvas application settings.

    " @@ -8672,7 +8676,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

    A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

    " + "documentation":"

    A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

    If you supply ModelPackageGroupName, your model package belongs to the model group you specify and uses the tags associated with the model group. In this case, you cannot supply a tag argument.

    " }, "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", @@ -9875,6 +9879,13 @@ }, "documentation":"

    Describes the location of the channel data.

    " }, + "DataSourceName":{ + "type":"string", + "enum":[ + "SalesforceGenie", + "Snowflake" + ] + }, "Database":{ "type":"string", "max":255, @@ -10841,6 +10852,16 @@ } } }, + "DerivedInformation":{ + "type":"structure", + "members":{ + "DerivedDataInputConfig":{ + "shape":"DataInputConfig", + "documentation":"

    The data input configuration that SageMaker Neo automatically derived for the model. When SageMaker Neo derives this information, you don't need to specify the data input configuration when you create a compilation job.

    " + } + }, + "documentation":"

    Information that SageMaker Neo automatically derived about the model.

    " + }, "DescribeActionRequest":{ "type":"structure", "required":["ActionName"], @@ -11489,6 +11510,10 @@ "VpcConfig":{ "shape":"NeoVpcConfig", "documentation":"

    A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud.

    " + }, + "DerivedInformation":{ + "shape":"DerivedInformation", + "documentation":"

    Information that SageMaker Neo automatically derived about the model.

    " } } }, @@ -18456,6 +18481,29 @@ "max":128, "min":32 }, + "IdentityProviderOAuthSetting":{ + "type":"structure", + "members":{ + "DataSourceName":{ + "shape":"DataSourceName", + "documentation":"

    The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud.

    " + }, + "Status":{ + "shape":"FeatureStatus", + "documentation":"

    Describes whether OAuth for a data source is enabled or disabled in the Canvas application.

    " + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    The ARN of an Amazon Web Services Secrets Manager secret that stores the credentials from your identity provider, such as the client ID and secret, authorization URL, and token URL.

    " + } + }, + "documentation":"

    The Amazon SageMaker Canvas app setting where you configure OAuth for connecting to an external data source, such as Snowflake.

    " + }, + "IdentityProviderOAuthSettings":{ + "type":"list", + "member":{"shape":"IdentityProviderOAuthSetting"}, + "max":20 + }, "Image":{ "type":"structure", "required":[ @@ -19139,7 +19187,6 @@ "type":"structure", "required":[ "S3Uri", - "DataInputConfig", "Framework" ], "members":{ @@ -24733,7 +24780,7 @@ "members":{ "Percentile":{ "shape":"String64", - "documentation":"

    The model latency percentile threshold. For custom load tests, specify the value as P95.

    " + "documentation":"

    The model latency percentile threshold. Acceptable values are P95 and P99. For custom load tests, specify the value as P95.

    " }, "ValueInMilliseconds":{ "shape":"Integer", @@ -29167,6 +29214,10 @@ "SupportedEndpointType":{ "shape":"RecommendationJobSupportedEndpointType", "documentation":"

    The endpoint type to receive recommendations for. By default this is null, and the results of the inference recommendation job return a combined list of both real-time and serverless benchmarks. By specifying a value for this field, you can receive a longer list of benchmarks for the desired endpoint type.

    " + }, + "SupportedResponseMIMETypes":{ + "shape":"RecommendationJobSupportedResponseMIMETypes", + "documentation":"

    The supported MIME types for the output data.

    " } }, "documentation":"

    Specifies mandatory fields for running an Inference Recommender job directly in the CreateInferenceRecommendationsJob API. The fields specified in ContainerConfig override the corresponding fields in the model package. Use ContainerConfig if you want to specify these fields for the recommendation job but don't want to edit them in your model package.

    " @@ -29213,7 +29264,7 @@ }, "JobDurationInSeconds":{ "shape":"JobDurationInSeconds", - "documentation":"

    Specifies the maximum duration of the job, in seconds. The maximum value is 7200.

    " + "documentation":"

    Specifies the maximum duration of the job, in seconds. The maximum value is 18,000 seconds.

    " }, "TrafficPattern":{ "shape":"TrafficPattern", @@ -29342,6 +29393,15 @@ "type":"list", "member":{"shape":"String"} }, + "RecommendationJobSupportedResponseMIMEType":{ + "type":"string", + "max":1024, + "pattern":"^[-\\w]+\\/.+$" + }, + "RecommendationJobSupportedResponseMIMETypes":{ + "type":"list", + "member":{"shape":"RecommendationJobSupportedResponseMIMEType"} + }, "RecommendationJobType":{ "type":"string", "enum":[ diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index b989ee0caeb2..7d142dd0f92f 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index e5be558fa38d..ec53a083c17c 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index aee2c052addd..49a73cf0393d 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 8dc297b49672..1134f1cb3e4e 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index ae3b905268fe..39230c51b426 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 9805741fb186..c6ff0a5fa5e0 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-rule-set.json index 1df2646e75fc..6abcf0015eb1 100644 --- a/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://runtime.sagemaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://runtime.sagemaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,180 +225,140 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://runtime-fips.sagemaker.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://runtime.sagemaker.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://runtime.sagemaker-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://runtime-fips.sagemaker.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://runtime.sagemaker.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://runtime.sagemaker.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://runtime.sagemaker-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://runtime.sagemaker.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://runtime.sagemaker.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://runtime.sagemaker.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-tests.json b/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-tests.json index 65c9d77c5c91..3317c56fa533 100644 --- a/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,8 +8,8 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -21,8 +21,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -34,8 +34,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -47,8 +47,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -99,8 +99,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -112,8 +112,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -125,8 +125,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -138,8 +138,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -151,8 +151,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -164,8 +164,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -177,8 +177,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -190,8 +190,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -203,8 +203,8 @@ } }, "params": { - "UseFIPS": false, "Region": "me-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -216,8 +216,8 @@ } }, "params": { - "UseFIPS": false, "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -229,8 +229,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -242,8 +242,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -255,8 +255,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -268,8 +268,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -281,8 +281,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -294,8 +294,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -307,8 +307,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -320,8 +320,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -333,8 +333,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -346,8 +346,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -359,8 +359,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -372,8 +372,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -385,8 +385,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -398,8 +398,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -411,8 +411,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -424,8 +424,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -437,8 +437,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -450,8 +450,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -463,8 +463,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -476,8 +476,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -489,8 +489,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -502,8 +502,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -513,8 +513,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -526,8 +526,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -537,8 +537,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -548,8 +548,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -561,8 +561,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -572,8 +572,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -585,8 +585,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -598,8 +598,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -623,8 +623,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -635,8 +635,8 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json index 979a8fd8d412..8d5630c55a28 100644 --- a/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json @@ -44,7 +44,25 @@ {"shape":"ServiceUnavailable"}, {"shape":"ValidationError"} ], - "documentation":"

    After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint in an asynchronous manner.

    Inference requests sent to this API are enqueued for asynchronous processing. The processing of the inference request may or may not complete before you receive a response from this API. The response from this API will not contain the result of the inference request but contain information about where you can locate it.

    Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

    Calls to InvokeEndpointAsync are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference.

    " + "documentation":"

    After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint in an asynchronous manner.

    Inference requests sent to this API are enqueued for asynchronous processing. The processing of the inference request may or may not complete before you receive a response from this API. The response from this API will not contain the result of the inference request but contain information about where you can locate it.

    Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

    Calls to InvokeEndpointAsync are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference.

    " + }, + "InvokeEndpointWithResponseStream":{ + "name":"InvokeEndpointWithResponseStream", + "http":{ + "method":"POST", + "requestUri":"/endpoints/{EndpointName}/invocations-response-stream" + }, + "input":{"shape":"InvokeEndpointWithResponseStreamInput"}, + "output":{"shape":"InvokeEndpointWithResponseStreamOutput"}, + "errors":[ + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"ValidationError"}, + {"shape":"ModelError"}, + {"shape":"ModelStreamError"}, + {"shape":"InternalStreamFailure"} + ], + "documentation":"

    Invokes a model at the specified endpoint to return the inference response as a stream. The inference stream provides the response payload incrementally as a series of parts. Before you can get an inference stream, you must have access to a model that's deployed using Amazon SageMaker hosting services, and the container for that model must support inference streaming.

    For more information that can help you use this API, see the following sections in the Amazon SageMaker Developer Guide:

    Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

    Calls to InvokeEndpointWithResponseStream are authenticated by using Amazon Web Services Signature Version 4. For information, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference.

    " } }, "shapes":{ @@ -70,6 +88,7 @@ "max":63, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, + "ErrorCode":{"type":"string"}, "Header":{ "type":"string", "max":1024, @@ -109,6 +128,16 @@ "fault":true, "synthetic":true }, + "InternalStreamFailure":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

    The stream processing failed because of an unknown error, exception or failure. Try your request again.

    ", + "exception":true, + "fault":true, + "synthetic":true + }, "InvocationTimeoutSecondsHeader":{ "type":"integer", "max":3600, @@ -123,7 +152,7 @@ "members":{ "EndpointName":{ "shape":"EndpointName", - "documentation":"

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    ", + "documentation":"

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    ", "location":"uri", "locationName":"EndpointName" }, @@ -135,13 +164,13 @@ }, "Accept":{ "shape":"Header", - "documentation":"

    The desired MIME type of the inference in the response.

    ", + "documentation":"

    The desired MIME type of the inference response from the model container.

    ", "location":"header", "locationName":"X-Amzn-SageMaker-Accept" }, "CustomAttributes":{ "shape":"CustomAttributesHeader", - "documentation":"

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", + "documentation":"

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", "location":"header", "locationName":"X-Amzn-SageMaker-Custom-Attributes" }, @@ -201,7 +230,7 @@ "members":{ "EndpointName":{ "shape":"EndpointName", - "documentation":"

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    ", + "documentation":"

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    ", "location":"uri", "locationName":"EndpointName" }, @@ -217,13 +246,13 @@ }, "Accept":{ "shape":"Header", - "documentation":"

    The desired MIME type of the inference in the response.

    ", + "documentation":"

    The desired MIME type of the inference response from the model container.

    ", "location":"header", "locationName":"Accept" }, "CustomAttributes":{ "shape":"CustomAttributesHeader", - "documentation":"

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", + "documentation":"

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", "location":"header", "locationName":"X-Amzn-SageMaker-Custom-Attributes" }, @@ -270,7 +299,7 @@ }, "ContentType":{ "shape":"Header", - "documentation":"

    The MIME type of the inference returned in the response body.

    ", + "documentation":"

    The MIME type of the inference returned from the model container.

    ", "location":"header", "locationName":"Content-Type" }, @@ -289,6 +318,88 @@ }, "payload":"Body" }, + "InvokeEndpointWithResponseStreamInput":{ + "type":"structure", + "required":[ + "EndpointName", + "Body" + ], + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    ", + "location":"uri", + "locationName":"EndpointName" + }, + "Body":{ + "shape":"BodyBlob", + "documentation":"

    Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

    For information about the format of the request body, see Common Data Formats-Inference.

    " + }, + "ContentType":{ + "shape":"Header", + "documentation":"

    The MIME type of the input data in the request body.

    ", + "location":"header", + "locationName":"Content-Type" + }, + "Accept":{ + "shape":"Header", + "documentation":"

    The desired MIME type of the inference response from the model container.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Accept" + }, + "CustomAttributes":{ + "shape":"CustomAttributesHeader", + "documentation":"

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1).

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Custom-Attributes" + }, + "TargetVariant":{ + "shape":"TargetVariantHeader", + "documentation":"

    Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.

    For information about how to use variant targeting to perform a/b testing, see Test models in production

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Variant" + }, + "TargetContainerHostname":{ + "shape":"TargetContainerHostnameHeader", + "documentation":"

    If the endpoint hosts multiple containers and is configured to use direct invocation, this parameter specifies the host name of the container to invoke.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Container-Hostname" + }, + "InferenceId":{ + "shape":"InferenceId", + "documentation":"

    An identifier that you assign to your request.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Inference-Id" + } + }, + "payload":"Body" + }, + "InvokeEndpointWithResponseStreamOutput":{ + "type":"structure", + "required":["Body"], + "members":{ + "Body":{"shape":"ResponseStream"}, + "ContentType":{ + "shape":"Header", + "documentation":"

    The MIME type of the inference returned from the model container.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Content-Type" + }, + "InvokedProductionVariant":{ + "shape":"Header", + "documentation":"

    Identifies the production variant that was invoked.

    ", + "location":"header", + "locationName":"x-Amzn-Invoked-Production-Variant" + }, + "CustomAttributes":{ + "shape":"CustomAttributesHeader", + "documentation":"

    Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back.

    The code in your model is responsible for setting or updating any custom attributes in the response. If your code does not set this value in the response, an empty value is returned. For example, if a custom attribute represents the trace ID, your model can prepend the custom attribute with Trace ID: in your post-processing function.

    This feature is currently supported in the Amazon Web Services SDKs but not in the Amazon SageMaker Python SDK.

    ", + "location":"header", + "locationName":"X-Amzn-SageMaker-Custom-Attributes" + } + }, + "payload":"Body" + }, "LogStreamArn":{"type":"string"}, "Message":{ "type":"string", @@ -325,11 +436,59 @@ "exception":true, "synthetic":true }, + "ModelStreamError":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    This error can have the following error codes:

    ModelInvocationTimeExceeded

    The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker.

    StreamBroken

    The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed.

    " + } + }, + "documentation":"

    An error occurred while streaming the response body. This error can have the following error codes:

    ModelInvocationTimeExceeded

    The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker.

    StreamBroken

    The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed.

    ", + "exception":true, + "synthetic":true + }, + "PartBlob":{ + "type":"blob", + "sensitive":true + }, + "PayloadPart":{ + "type":"structure", + "members":{ + "Bytes":{ + "shape":"PartBlob", + "documentation":"

    A blob that contains part of the response for your streaming inference request.

    ", + "eventpayload":true + } + }, + "documentation":"

    A wrapper for pieces of the payload that's returned in response to a streaming inference request. A streaming inference response consists of one or more payload parts.

    ", + "event":true + }, "RequestTTLSecondsHeader":{ "type":"integer", "max":21600, "min":60 }, + "ResponseStream":{ + "type":"structure", + "members":{ + "PayloadPart":{ + "shape":"PayloadPart", + "documentation":"

    A wrapper for pieces of the payload that's returned in response to a streaming inference request. A streaming inference response consists of one or more payload parts.

    " + }, + "ModelStreamError":{ + "shape":"ModelStreamError", + "documentation":"

    An error occurred while streaming the response body. This error can have the following error codes:

    ModelInvocationTimeExceeded

    The model failed to finish sending the response within the timeout period allowed by Amazon SageMaker.

    StreamBroken

    The Transmission Control Protocol (TCP) connection between the client and the model was reset or closed.

    " + }, + "InternalStreamFailure":{ + "shape":"InternalStreamFailure", + "documentation":"

    The stream processing failed because of an unknown error, exception or failure. Try your request again.

    " + } + }, + "documentation":"

    A stream of payload parts. Each part contains a portion of the response for a streaming inference request.

    ", + "eventstream":true + }, "ServiceUnavailable":{ "type":"structure", "members":{ diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 061a8bf1f80d..c551160666c7 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index f7cc8545bbe1..b29ea04cba00 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 08aa38e4384c..45f873aa5ca9 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 7fad7482eef1..5387c52be33d 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 049d03dce9b9..7915b581fea1 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/customization.config b/services/securityhub/src/main/resources/codegen-resources/customization.config index 045846615ab7..4f8246fb980f 100644 --- a/services/securityhub/src/main/resources/codegen-resources/customization.config +++ b/services/securityhub/src/main/resources/codegen-resources/customization.config @@ -9,7 +9,7 @@ "listInvitations", "listMembers" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getEnabledStandards", "getInsights" ] diff --git a/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json index bcaddb83461d..659462cfddb7 100644 --- a/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://securityhub-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://securityhub-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://securityhub-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://securityhub-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://securityhub.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://securityhub.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://securityhub.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://securityhub.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index db6338f2b77f..1165e03a0615 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -396,7 +396,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes the specified member accounts from Security Hub.

    Can be used to delete member accounts that belong to an organization as well as member accounts that were invited manually.

    " + "documentation":"

    Deletes the specified member accounts from Security Hub.

    You can invoke this API only to delete accounts that became members through invitation. You can't invoke this API to delete accounts that belong to an Organizations organization.

    " }, "DescribeActionTargets":{ "name":"DescribeActionTargets", @@ -541,7 +541,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

    When you disable Security Hub for an administrator account, it doesn't disable Security Hub for any associated member accounts.

    When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your administrator and member account associations are removed.

    If you want to save your existing findings, you must export them before you disable Security Hub.

    " + "documentation":"

    Disables Security Hub in your account only in the current Amazon Web Services Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

    You can't disable Security Hub in an account that is currently the Security Hub administrator.

    When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your administrator and member account associations are removed.

    If you want to save your existing findings, you must export them before you disable Security Hub.

    " }, "DisassociateFromAdministratorAccount":{ "name":"DisassociateFromAdministratorAccount", @@ -1571,143 +1571,143 @@ "members":{ "ProductArn":{ "shape":"StringFilterList", - "documentation":"

    The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.

    " + "documentation":"

    The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "AwsAccountId":{ "shape":"StringFilterList", - "documentation":"

    The Amazon Web Services account ID in which a finding was generated.

    " + "documentation":"

    The Amazon Web Services account ID in which a finding was generated.

    Array Members: Minimum number of 1 item. Maximum number of 100 items.

    " }, "Id":{ "shape":"StringFilterList", - "documentation":"

    The product-specific identifier for a finding.

    " + "documentation":"

    The product-specific identifier for a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "GeneratorId":{ "shape":"StringFilterList", - "documentation":"

    The identifier for the solution-specific component that generated a finding.

    " + "documentation":"

    The identifier for the solution-specific component that generated a finding.

    Array Members: Minimum number of 1 item. Maximum number of 100 items.

    " }, "Type":{ "shape":"StringFilterList", - "documentation":"

    One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide.

    " + "documentation":"

    One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "FirstObservedAt":{ "shape":"DateFilterList", - "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "LastObservedAt":{ "shape":"DateFilterList", - "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "CreatedAt":{ "shape":"DateFilterList", - "documentation":"

    A timestamp that indicates when this finding record was created.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + "documentation":"

    A timestamp that indicates when this finding record was created.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "UpdatedAt":{ "shape":"DateFilterList", - "documentation":"

    A timestamp that indicates when the finding record was most recently updated.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + "documentation":"

    A timestamp that indicates when the finding record was most recently updated.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "Confidence":{ "shape":"NumberFilterList", - "documentation":"

    The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide.

    " + "documentation":"

    The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "Criticality":{ "shape":"NumberFilterList", - "documentation":"

    The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide.

    " + "documentation":"

    The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "Title":{ "shape":"StringFilterList", - "documentation":"

    A finding's title.

    " + "documentation":"

    A finding's title.

    Array Members: Minimum number of 1 item. Maximum number of 100 items.

    " }, "Description":{ "shape":"StringFilterList", - "documentation":"

    A finding's description.

    " + "documentation":"

    A finding's description.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "SourceUrl":{ "shape":"StringFilterList", - "documentation":"

    Provides a URL that links to a page about the current finding in the finding product.

    " + "documentation":"

    Provides a URL that links to a page about the current finding in the finding product.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ProductName":{ "shape":"StringFilterList", - "documentation":"

    Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.

    " + "documentation":"

    Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "CompanyName":{ "shape":"StringFilterList", - "documentation":"

    The name of the company for the product that generated the finding. For control-based findings, the company is Amazon Web Services.

    " + "documentation":"

    The name of the company for the product that generated the finding. For control-based findings, the company is Amazon Web Services.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "SeverityLabel":{ "shape":"StringFilterList", - "documentation":"

    The severity value of the finding.

    " + "documentation":"

    The severity value of the finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ResourceType":{ "shape":"StringFilterList", - "documentation":"

    The type of resource that the finding pertains to.

    " + "documentation":"

    The type of resource that the finding pertains to.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ResourceId":{ "shape":"StringFilterList", - "documentation":"

    The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Service that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource.

    " + "documentation":"

    The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Service that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource.

    Array Members: Minimum number of 1 item. Maximum number of 100 items.

    " }, "ResourcePartition":{ "shape":"StringFilterList", - "documentation":"

    The partition in which the resource that the finding pertains to is located. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

    " + "documentation":"

    The partition in which the resource that the finding pertains to is located. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ResourceRegion":{ "shape":"StringFilterList", - "documentation":"

    The Amazon Web Services Region where the resource that a finding pertains to is located.

    " + "documentation":"

    The Amazon Web Services Region where the resource that a finding pertains to is located.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ResourceTags":{ "shape":"MapFilterList", - "documentation":"

    A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

    " + "documentation":"

    A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ResourceDetailsOther":{ "shape":"MapFilterList", - "documentation":"

    Custom fields and values about the resource that a finding pertains to.

    " + "documentation":"

    Custom fields and values about the resource that a finding pertains to.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ComplianceStatus":{ "shape":"StringFilterList", - "documentation":"

    The result of a security check. This field is only used for findings generated from controls.

    " + "documentation":"

    The result of a security check. This field is only used for findings generated from controls.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ComplianceSecurityControlId":{ "shape":"StringFilterList", - "documentation":"

    The security control ID for which a finding was generated. Security control IDs are the same across standards.

    " + "documentation":"

    The security control ID for which a finding was generated. Security control IDs are the same across standards.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "ComplianceAssociatedStandardsId":{ "shape":"StringFilterList", - "documentation":"

    The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the DescribeStandards API response.

    " + "documentation":"

    The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the DescribeStandards API response.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "VerificationState":{ "shape":"StringFilterList", - "documentation":"

    Provides the veracity of a finding.

    " + "documentation":"

    Provides the veracity of a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "WorkflowStatus":{ "shape":"StringFilterList", - "documentation":"

    Provides information about the status of the investigation into a finding.

    " + "documentation":"

    Provides information about the status of the investigation into a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "RecordState":{ "shape":"StringFilterList", - "documentation":"

    Provides the current state of a finding.

    " + "documentation":"

    Provides the current state of a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "RelatedFindingsProductArn":{ "shape":"StringFilterList", - "documentation":"

    The ARN for the product that generated a related finding.

    " + "documentation":"

    The ARN for the product that generated a related finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "RelatedFindingsId":{ "shape":"StringFilterList", - "documentation":"

    The product-generated identifier for a related finding.

    " + "documentation":"

    The product-generated identifier for a related finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "NoteText":{ "shape":"StringFilterList", - "documentation":"

    The text of a user-defined note that's added to a finding.

    " + "documentation":"

    The text of a user-defined note that's added to a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "NoteUpdatedAt":{ "shape":"DateFilterList", - "documentation":"

    The timestamp of when the note was updated. Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + "documentation":"

    The timestamp of when the note was updated. Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "NoteUpdatedBy":{ "shape":"StringFilterList", - "documentation":"

    The principal that created a note.

    " + "documentation":"

    The principal that created a note.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " }, "UserDefinedFields":{ "shape":"MapFilterList", - "documentation":"

    A list of user-defined name and value string pairs added to a finding.

    " + "documentation":"

    A list of user-defined name and value string pairs added to a finding.

    Array Members: Minimum number of 1 item. Maximum number of 20 items.

    " } }, "documentation":"

    The criteria that determine which findings a rule applies to.

    " @@ -12768,6 +12768,10 @@ "Sample":{ "shape":"Boolean", "documentation":"

    Indicates whether the finding is a sample finding.

    " + }, + "GeneratorDetails":{ + "shape":"GeneratorDetails", + "documentation":"

    Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to findings that relate to Lambda functions. Amazon Inspector identifies policy violations and vulnerabilities in Lambda function code based on internal detectors developed in collaboration with Amazon CodeGuru. Security Hub receives those findings.

    " } }, "documentation":"

    Provides a consistent format for Security Hub findings. AwsSecurityFinding format allows you to share findings between Amazon Web Services security services and third-party solutions.

    A finding is a potential security issue generated either by Amazon Web Services services or by the integrated third-party solutions and standards checks.

    " @@ -14629,6 +14633,28 @@ }, "documentation":"

    Provides details about the current status of the sensitive data detection.

    " }, + "CodeVulnerabilitiesFilePath":{ + "type":"structure", + "members":{ + "EndLine":{ + "shape":"Integer", + "documentation":"

    The line number of the last line of code in which the vulnerability is located.

    " + }, + "FileName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the file in which the code vulnerability is located.

    " + }, + "FilePath":{ + "shape":"NonEmptyString", + "documentation":"

    The file path to the code in which the vulnerability is located.

    " + }, + "StartLine":{ + "shape":"Integer", + "documentation":"

    The line number of the first line of code in which the vulnerability is located.

    " + } + }, + "documentation":"

    Provides details about where a code vulnerability is located in your Lambda function.

    " + }, "Compliance":{ "type":"structure", "members":{ @@ -15720,6 +15746,24 @@ "type":"list", "member":{"shape":"FirewallPolicyStatelessRuleGroupReferencesDetails"} }, + "GeneratorDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the detector used to identify the code vulnerability.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the detector used to identify the code vulnerability.

    " + }, + "Labels":{ + "shape":"TypeList", + "documentation":"

    An array of tags used to identify the detector associated with the finding.

    " + } + }, + "documentation":"

    Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to findings that relate to Lambda functions. Amazon Inspector identifies policy violations and vulnerabilities in Lambda function code based on internal detectors developed in collaboration with Amazon CodeGuru. Security Hub receives those findings.

    " + }, "GeoLocation":{ "type":"structure", "members":{ @@ -19497,10 +19541,51 @@ "FixAvailable":{ "shape":"VulnerabilityFixAvailable", "documentation":"

    Specifies if all vulnerable packages in a finding have a value for FixedInVersion and Remediation. This field is evaluated for each vulnerability Id based on the number of vulnerable packages that have a value for both FixedInVersion and Remediation. Valid values are as follows:

    • YES if all vulnerable packages have a value for both FixedInVersion and Remediation

    • NO if no vulnerable packages have a value for FixedInVersion and Remediation

    • PARTIAL otherwise

    " + }, + "EpssScore":{ + "shape":"Double", + "documentation":"

    The Exploit Prediction Scoring System (EPSS) score for a finding.

    " + }, + "ExploitAvailable":{ + "shape":"VulnerabilityExploitAvailable", + "documentation":"

    Whether an exploit is available for a finding.

    " + }, + "CodeVulnerabilities":{ + "shape":"VulnerabilityCodeVulnerabilitiesList", + "documentation":"

    The vulnerabilities found in your Lambda function code. This field pertains to findings that Security Hub receives from Amazon Inspector.

    " } }, "documentation":"

    A vulnerability associated with a finding.

    " }, + "VulnerabilityCodeVulnerabilities":{ + "type":"structure", + "members":{ + "Cwes":{ + "shape":"TypeList", + "documentation":"

    The Common Weakness Enumeration (CWE) item associated with the detected code vulnerability.

    " + }, + "FilePath":{ + "shape":"CodeVulnerabilitiesFilePath", + "documentation":"

    Provides details about where a code vulnerability is located in your Lambda function.

    " + }, + "SourceArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) of the Lambda layer in which the code vulnerability is located.

    " + } + }, + "documentation":"

    Provides details about the vulnerabilities found in your Lambda function code. This field pertains to findings that Security Hub receives from Amazon Inspector.

    " + }, + "VulnerabilityCodeVulnerabilitiesList":{ + "type":"list", + "member":{"shape":"VulnerabilityCodeVulnerabilities"} + }, + "VulnerabilityExploitAvailable":{ + "type":"string", + "enum":[ + "YES", + "NO" + ] + }, "VulnerabilityFixAvailable":{ "type":"string", "enum":[ @@ -19582,7 +19667,7 @@ "documentation":"

    The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

    The allowed values are the following.

    • NEW - The initial state of a finding, before it is reviewed.

      Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases:

      • RecordState changes from ARCHIVED to ACTIVE.

      • ComplianceStatus changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE.

    • NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.

    • SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.

    • RESOLVED - The finding was reviewed and remediated and is now considered resolved.

    " } }, - "documentation":"

    Provides information about the status of the investigation into a finding.

    " + "documentation":"

    Provides details about the status of the investigation into a finding.

    " }, "WorkflowState":{ "type":"string", @@ -19616,5 +19701,5 @@ "documentation":"

    Used to update information about the investigation into the finding.

    " } }, - "documentation":"

    Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security HubUser Guide.

    When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, run the same command for each Region in which you want to apply the change.

    For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

    The following throttling limits apply to using Security Hub API operations.

    • BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second.

    • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

    • BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

    • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    " + "documentation":"

    Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security Hub User Guide .

    When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, run the same command for each Region in which you want to apply the change.

    For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

    The following throttling limits apply to using Security Hub API operations.

    • BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second.

    • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

    • BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

    • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    " } diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 5653d935b80f..fc257e2808d3 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json index 44e4b725ce15..35e974c2b0f5 100644 --- a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-gov-east-1.api.aws" + "url": "https://securitylake-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-gov-east-1.amazonaws.com" + "url": "https://securitylake-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://securitylake.us-gov-east-1.api.aws" + "url": "https://securitylake.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-gov-east-1.amazonaws.com" + "url": "https://securitylake.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://securitylake-fips.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://securitylake-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://securitylake.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-iso-east-1.c2s.ic.gov" + "url": "https://securitylake.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://securitylake-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-east-1.amazonaws.com" + "url": "https://securitylake-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://securitylake.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-east-1.amazonaws.com" + "url": "https://securitylake.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,22 +247,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/securitylake/src/main/resources/codegen-resources/service-2.json b/services/securitylake/src/main/resources/codegen-resources/service-2.json index fd01ceeef35b..2aadfa9cc226 100644 --- a/services/securitylake/src/main/resources/codegen-resources/service-2.json +++ b/services/securitylake/src/main/resources/codegen-resources/service-2.json @@ -1833,7 +1833,8 @@ }, "NextToken":{ "type":"string", - "pattern":"^[\\\\\\w\\-_:/.@=+]*$" + "max":2048, + "min":0 }, "NotificationConfiguration":{ "type":"structure", diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 6c3d2532f72b..83464c27d6c0 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config index fece2bf044f0..2fb45a9d3f02 100644 --- a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config +++ b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,4 @@ { - "blacklistedSimpleMethods" : ["createApplication"], + "excludedSimpleMethods" : ["createApplication"], "verifiedSimpleMethods" : ["listApplications"] } diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index cfefb45193e6..abe7c9c79fc8 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 5a4fa7ee1250..fe77038cccbc 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 34e9cd51d092..8fbd39835f70 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index b2d90fbba0cc..b34021601029 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json index 05489b7fa44e..df63941e7487 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://servicequotas-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://servicequotas-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -247,7 +241,7 @@ } ], "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://servicequotas.{Region}.amazonaws.com", "properties": {}, "headers": {} }, @@ -263,78 +257,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://servicequotas.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json b/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json index a7b637a9d4d4..c4b5fe6484ab 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,822 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-1.amazonaws.com" + "url": "https://servicequotas.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-northeast-1.api.aws" + "url": "https://servicequotas.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -827,594 +34,547 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.me-south-1.amazonaws.com" + "url": "https://servicequotas.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.sa-east-1.api.aws" + "url": "https://servicequotas.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.sa-east-1.amazonaws.com" + "url": "https://servicequotas.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.sa-east-1.api.aws" + "url": "https://servicequotas.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.sa-east-1.amazonaws.com" + "url": "https://servicequotas.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-east-1.api.aws" + "url": "https://servicequotas.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-east-1.amazonaws.com" + "url": "https://servicequotas.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-east-1.api.aws" + "url": "https://servicequotas.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-east-1.amazonaws.com" + "url": "https://servicequotas.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-north-1.amazonaws.com.cn" + "url": "https://servicequotas.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-north-1.amazonaws.com.cn" + "url": "https://servicequotas.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-gov-west-1.api.aws" + "url": "https://servicequotas.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.amazonaws.com" + "url": "https://servicequotas.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.api.aws" + "url": "https://servicequotas.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.amazonaws.com" + "url": "https://servicequotas.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-1.api.aws" + "url": "https://servicequotas.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-1.amazonaws.com" + "url": "https://servicequotas.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-1.api.aws" + "url": "https://servicequotas-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-1.amazonaws.com" + "url": "https://servicequotas-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-2.api.aws" + "url": "https://servicequotas.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-2.amazonaws.com" + "url": "https://servicequotas-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-2.api.aws" + "url": "https://servicequotas-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-2.amazonaws.com" + "url": "https://servicequotas.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-3.api.aws" + "url": "https://servicequotas.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-3.amazonaws.com" + "url": "https://servicequotas.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-3.api.aws" + "url": "https://servicequotas.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-3.amazonaws.com" + "url": "https://servicequotas.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-1.api.aws" + "url": "https://servicequotas.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-1.amazonaws.com" + "url": "https://servicequotas-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-east-1.api.aws" + "url": "https://servicequotas.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-2.api.aws" + "url": "https://servicequotas-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-east-2.api.aws" + "url": "https://servicequotas.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1424,9 +584,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1436,11 +596,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/servicequotas/src/main/resources/codegen-resources/service-2.json b/services/servicequotas/src/main/resources/codegen-resources/service-2.json index 073012f5c3cf..5bcc52fae10f 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/service-2.json +++ b/services/servicequotas/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"TemplatesNotAvailableInRegionException"}, {"shape":"NoAvailableOrganizationException"} ], - "documentation":"

    Associates your quota request template with your organization. When a new account is created in your organization, the quota increase requests in the template are automatically applied to the account. You can add a quota increase request for any adjustable quota to your template.

    " + "documentation":"

    Associates your quota request template with your organization. When a new Amazon Web Services account is created in your organization, the quota increase requests in the template are automatically applied to the account. You can add a quota increase request for any adjustable quota to your template.

    " }, "DeleteServiceQuotaIncreaseRequestFromTemplate":{ "name":"DeleteServiceQuotaIncreaseRequestFromTemplate", @@ -71,7 +71,7 @@ {"shape":"TemplatesNotAvailableInRegionException"}, {"shape":"NoAvailableOrganizationException"} ], - "documentation":"

    Disables your quota request template. After a template is disabled, the quota increase requests in the template are not applied to new accounts in your organization. Disabling a quota request template does not apply its quota increase requests.

    " + "documentation":"

    Disables your quota request template. After a template is disabled, the quota increase requests in the template are not applied to new Amazon Web Services accounts in your organization. Disabling a quota request template does not apply its quota increase requests.

    " }, "GetAWSDefaultServiceQuota":{ "name":"GetAWSDefaultServiceQuota", @@ -181,7 +181,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the default values for the quotas for the specified AWS service. A default value does not reflect any quota increases.

    " + "documentation":"

    Lists the default values for the quotas for the specified Amazon Web Service. A default value does not reflect any quota increases.

    " }, "ListRequestedServiceQuotaChangeHistory":{ "name":"ListRequestedServiceQuotaChangeHistory", @@ -199,7 +199,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Retrieves the quota increase requests for the specified service.

    " + "documentation":"

    Retrieves the quota increase requests for the specified Amazon Web Service.

    " }, "ListRequestedServiceQuotaChangeHistoryByQuota":{ "name":"ListRequestedServiceQuotaChangeHistoryByQuota", @@ -255,7 +255,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the applied quota values for the specified AWS service. For some quotas, only the default values are available. If the applied quota value is not available for a quota, the quota is not retrieved.

    " + "documentation":"

    Lists the applied quota values for the specified Amazon Web Service. For some quotas, only the default values are available. If the applied quota value is not available for a quota, the quota is not retrieved.

    " }, "ListServices":{ "name":"ListServices", @@ -272,7 +272,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the names and codes for the services integrated with Service Quotas.

    " + "documentation":"

    Lists the names and codes for the Amazon Web Services integrated with Service Quotas.

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -394,6 +394,14 @@ "min":1, "pattern":"arn:aws(-[\\w]+)*:*:.+:[0-9]{12}:.+" }, + "AppliedLevelEnum":{ + "type":"string", + "enum":[ + "ACCOUNT", + "RESOURCE", + "ALL" + ] + }, "AssociateServiceQuotaTemplateRequest":{ "type":"structure", "members":{ @@ -422,15 +430,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which the request was made.

    " } } }, @@ -472,7 +480,7 @@ "members":{ "ErrorCode":{ "shape":"ErrorCode", - "documentation":"

    Service Quotas returns the following error values:

    • DEPENDENCY_ACCESS_DENIED_ERROR - The caller does not have the required permissions to complete the action. To resolve the error, you must have permission to access the service or quota.

    • DEPENDENCY_THROTTLING_ERROR - The service is throttling Service Quotas.

    • DEPENDENCY_SERVICE_ERROR - The service is not available.

    • SERVICE_QUOTA_NOT_AVAILABLE_ERROR - There was an error in Service Quotas.

    " + "documentation":"

    Service Quotas returns the following error values:

    • DEPENDENCY_ACCESS_DENIED_ERROR - The caller does not have the required permissions to complete the action. To resolve the error, you must have permission to access the Amazon Web Service or quota.

    • DEPENDENCY_THROTTLING_ERROR - The Amazon Web Service is throttling Service Quotas.

    • DEPENDENCY_SERVICE_ERROR - The Amazon Web Service is not available.

    • SERVICE_QUOTA_NOT_AVAILABLE_ERROR - There was an error in Service Quotas.

    " }, "ErrorMessage":{ "shape":"ErrorMessage", @@ -491,11 +499,11 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " } } }, @@ -518,7 +526,7 @@ "members":{ "ServiceQuotaTemplateAssociationStatus":{ "shape":"ServiceQuotaTemplateAssociationStatus", - "documentation":"

    The association status. If the status is ASSOCIATED, the quota increase requests in the template are automatically applied to new accounts in your organization.

    " + "documentation":"

    The association status. If the status is ASSOCIATED, the quota increase requests in the template are automatically applied to new Amazon Web Services accounts in your organization.

    " } } }, @@ -528,7 +536,7 @@ "members":{ "RequestId":{ "shape":"RequestId", - "documentation":"

    The ID of the quota increase request.

    " + "documentation":"

    Specifies the ID of the quota increase request.

    " } } }, @@ -551,15 +559,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which you made the request.

    " } } }, @@ -581,11 +589,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " } } }, @@ -638,15 +650,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -655,7 +667,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Quotas":{ "shape":"ServiceQuotaListDefinition", @@ -672,23 +684,27 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "Status":{ "shape":"RequestStatus", - "documentation":"

    The status value of the quota increase request.

    " + "documentation":"

    Specifies that you want to filter the results to only the requests with the matching status.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " } } }, @@ -697,7 +713,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "RequestedQuotas":{ "shape":"RequestedServiceQuotaChangeHistoryListDefinition", @@ -710,19 +726,23 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "Status":{ "shape":"RequestStatus", - "documentation":"

    The status of the quota increase request.

    " + "documentation":"

    Specifies that you want to filter the results to only the requests with the matching status.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " } } }, @@ -731,7 +751,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "RequestedQuotas":{ "shape":"RequestedServiceQuotaChangeHistoryListDefinition", @@ -744,19 +764,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which you made the request.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -769,7 +789,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " } } }, @@ -779,15 +799,23 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaCode":{ + "shape":"QuotaCode", + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " + }, + "QuotaAppliedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level of granularity that the quota value is applied.

    " } } }, @@ -796,7 +824,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Quotas":{ "shape":"ServiceQuotaListDefinition", @@ -809,11 +837,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -822,11 +850,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Services":{ "shape":"ServiceInfoListDefinition", - "documentation":"

    Information about the services.

    " + "documentation":"

    The list of the Amazon Web Service names and service codes.

    " } } }, @@ -836,7 +864,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota for which you want to list tags. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota for which you want to list tags. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " } } }, @@ -894,7 +922,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The account making this call is not a member of an organization.

    ", + "documentation":"

    The Amazon Web Services account making this call is not a member of an organization.

    ", "exception":true }, "NoSuchResourceException":{ @@ -910,7 +938,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The organization that your account belongs to is not in All Features mode.

    ", + "documentation":"

    The organization that your Amazon Web Services account belongs to is not in All Features mode.

    ", "exception":true }, "OutputTags":{ @@ -942,19 +970,19 @@ "members":{ "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region to which the template applies.

    " }, "DesiredValue":{ "shape":"QuotaValue", - "documentation":"

    The new, increased value for the quota.

    " + "documentation":"

    Specifies the new, increased value for the quota.

    " } } }, @@ -975,6 +1003,33 @@ "min":1, "pattern":"[a-zA-Z][a-zA-Z0-9-]{1,128}" }, + "QuotaContextId":{"type":"string"}, + "QuotaContextInfo":{ + "type":"structure", + "members":{ + "ContextScope":{ + "shape":"QuotaContextScope", + "documentation":"

    Specifies whether the quota applies to an Amazon Web Services account, or to a resource.

    " + }, + "ContextScopeType":{ + "shape":"QuotaContextScopeType", + "documentation":"

    When the ContextScope is RESOURCE, then this specifies the resource type of the specified resource.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " + } + }, + "documentation":"

    A structure that describes the context for a service quota. The context identifies what the quota applies to.

    " + }, + "QuotaContextScope":{ + "type":"string", + "enum":[ + "RESOURCE", + "ACCOUNT" + ] + }, + "QuotaContextScopeType":{"type":"string"}, "QuotaExceededException":{ "type":"structure", "members":{ @@ -991,7 +1046,7 @@ "members":{ "PeriodValue":{ "shape":"PeriodValue", - "documentation":"

    The value.

    " + "documentation":"

    The value associated with the reported PeriodUnit.

    " }, "PeriodUnit":{ "shape":"PeriodUnit", @@ -1022,15 +1077,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "DesiredValue":{ "shape":"QuotaValue", - "documentation":"

    The new, increased value for the quota.

    " + "documentation":"

    Specifies the new, increased value for the quota.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " } } }, @@ -1050,7 +1109,9 @@ "CASE_OPENED", "APPROVED", "DENIED", - "CASE_CLOSED" + "CASE_CLOSED", + "NOT_APPROVED", + "INVALID_REQUEST" ] }, "RequestedServiceQuotaChange":{ @@ -1066,19 +1127,19 @@ }, "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "DesiredValue":{ "shape":"QuotaValue", @@ -1111,6 +1172,14 @@ "Unit":{ "shape":"QuotaUnit", "documentation":"

    The unit of measurement.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " + }, + "QuotaContext":{ + "shape":"QuotaContextInfo", + "documentation":"

    The context for this service quota.

    " } }, "documentation":"

    Information about a quota increase request.

    " @@ -1148,14 +1217,14 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " } }, - "documentation":"

    Information about a service.

    " + "documentation":"

    Information about an Amazon Web Service.

    " }, "ServiceInfoListDefinition":{ "type":"list", @@ -1167,11 +1236,11 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaArn":{ "shape":"QuotaArn", @@ -1179,11 +1248,11 @@ }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "Value":{ "shape":"QuotaValue", @@ -1212,6 +1281,14 @@ "ErrorReason":{ "shape":"ErrorReason", "documentation":"

    The error code and error reason.

    " + }, + "QuotaAppliedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level of granularity that the quota value is applied.

    " + }, + "QuotaContext":{ + "shape":"QuotaContextInfo", + "documentation":"

    The context for this service quota.

    " } }, "documentation":"

    Information about a quota.

    " @@ -1221,19 +1298,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "DesiredValue":{ "shape":"QuotaValue", @@ -1241,7 +1318,7 @@ }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    The Amazon Web Services Region.

    " }, "Unit":{ "shape":"QuotaUnit", @@ -1324,7 +1401,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " }, "Tags":{ "shape":"InputTags", @@ -1348,7 +1425,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The Service Quotas template is not available in this AWS Region.

    ", + "documentation":"

    The Service Quotas template is not available in this Amazon Web Services Region.

    ", "exception":true }, "TooManyRequestsException":{ @@ -1376,7 +1453,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota that you want to untag. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota that you want to untag. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " }, "TagKeys":{ "shape":"InputTagKeys", @@ -1390,5 +1467,5 @@ } } }, - "documentation":"

    With Service Quotas, you can view and manage your quotas easily as your AWS workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your AWS account. For more information, see the Service Quotas User Guide.

    " + "documentation":"

    With Service Quotas, you can view and manage your quotas easily as your Amazon Web Services workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your Amazon Web Services account. For more information, see the Service Quotas User Guide.

    " } diff --git a/services/ses/pom.xml b/services/ses/pom.xml index ea8beadd35e2..0f2c7e2e1dc2 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/ses/src/main/resources/codegen-resources/customization.config b/services/ses/src/main/resources/codegen-resources/customization.config index 9c5a65cc95bb..232f5a35cdbd 100644 --- a/services/ses/src/main/resources/codegen-resources/customization.config +++ b/services/ses/src/main/resources/codegen-resources/customization.config @@ -13,7 +13,7 @@ "listTemplates", "listVerifiedEmailAddresses" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateAccountSendingEnabled" ] } diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index e3ac772ae379..336ce2b597b5 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 1d567c5f9741..900cfc973ee7 100644 --- a/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json index 188b42d8a1d4..5b3c2d8c3ac0 100644 --- a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,11 @@ "output_token": "NextToken", "limit_key": "PageSize" }, + "ListExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, "ListImportJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index 668c5a618abc..ee62c7b3fda1 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -29,6 +29,21 @@ ], "documentation":"

    Retrieves batches of metric data collected based on your sending activity.

    You can execute this operation no more than 16 times per second, and with at most 160 queries from the batches per second (cumulative).

    " }, + "CancelExportJob":{ + "name":"CancelExportJob", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/export-jobs/{JobId}/cancel" + }, + "input":{"shape":"CancelExportJobRequest"}, + "output":{"shape":"CancelExportJobResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels an export job.

    " + }, "CreateConfigurationSet":{ "name":"CreateConfigurationSet", "http":{ @@ -202,6 +217,22 @@ ], "documentation":"

    Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, + "CreateExportJob":{ + "name":"CreateExportJob", + "http":{ + "method":"POST", + "requestUri":"/v2/email/export-jobs" + }, + "input":{"shape":"CreateExportJobRequest"}, + "output":{"shape":"CreateExportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an export job for a data source and destination.

    You can execute this operation no more than once per second.

    " + }, "CreateImportJob":{ "name":"CreateImportJob", "http":{ @@ -625,6 +656,21 @@ ], "documentation":"

    Displays the template object (which includes the subject line, HTML part and text part) for the template you specify.

    You can execute this operation no more than once per second.

    " }, + "GetExportJob":{ + "name":"GetExportJob", + "http":{ + "method":"GET", + "requestUri":"/v2/email/export-jobs/{JobId}" + }, + "input":{"shape":"GetExportJobRequest"}, + "output":{"shape":"GetExportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Provides information about an export job.

    " + }, "GetImportJob":{ "name":"GetImportJob", "http":{ @@ -640,6 +686,21 @@ ], "documentation":"

    Provides information about an import job.

    " }, + "GetMessageInsights":{ + "name":"GetMessageInsights", + "http":{ + "method":"GET", + "requestUri":"/v2/email/insights/{MessageId}/" + }, + "input":{"shape":"GetMessageInsightsRequest"}, + "output":{"shape":"GetMessageInsightsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Provides information about a specific message, including the from address, the subject, the recipient address, email tags, as well as events associated with the message.

    You can execute this operation no more than once per second.

    " + }, "GetSuppressedDestination":{ "name":"GetSuppressedDestination", "http":{ @@ -784,6 +845,20 @@ ], "documentation":"

    Lists the email templates present in your Amazon SES account in the current Amazon Web Services Region.

    You can execute this operation no more than once per second.

    " }, + "ListExportJobs":{ + "name":"ListExportJobs", + "http":{ + "method":"POST", + "requestUri":"/v2/email/list-export-jobs" + }, + "input":{"shape":"ListExportJobsRequest"}, + "output":{"shape":"ListExportJobsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists all of the export jobs.

    " + }, "ListImportJobs":{ "name":"ListImportJobs", "http":{ @@ -1556,6 +1631,33 @@ }, "documentation":"

    Represents the body of the email message.

    " }, + "Bounce":{ + "type":"structure", + "members":{ + "BounceType":{ + "shape":"BounceType", + "documentation":"

    The type of the bounce, as determined by SES. Can be one of UNDETERMINED, TRANSIENT, or PERMANENT

    " + }, + "BounceSubType":{ + "shape":"BounceSubType", + "documentation":"

    The subtype of the bounce, as determined by SES.

    " + }, + "DiagnosticCode":{ + "shape":"DiagnosticCode", + "documentation":"

    The status code issued by the reporting Message Transfer Authority (MTA). This field only appears if a delivery status notification (DSN) was attached to the bounce and the Diagnostic-Code was provided in the DSN.

    " + } + }, + "documentation":"

    Information about a Bounce event.

    " + }, + "BounceSubType":{"type":"string"}, + "BounceType":{ + "type":"string", + "enum":[ + "UNDETERMINED", + "TRANSIENT", + "PERMANENT" + ] + }, "BulkEmailContent":{ "type":"structure", "members":{ @@ -1632,6 +1734,25 @@ ] }, "CampaignId":{"type":"string"}, + "CancelExportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    ", + "location":"uri", + "locationName":"JobId" + } + }, + "documentation":"

    Represents a request to cancel an export job using the export job ID.

    " + }, + "CancelExportJobResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "CaseId":{"type":"string"}, "Charset":{"type":"string"}, "CloudWatchDestination":{ @@ -1672,6 +1793,22 @@ "type":"list", "member":{"shape":"CloudWatchDimensionConfiguration"} }, + "Complaint":{ + "type":"structure", + "members":{ + "ComplaintSubType":{ + "shape":"ComplaintSubType", + "documentation":"

    Can either be null or OnAccountSuppressionList. If the value is OnAccountSuppressionList, SES accepted the message, but didn't attempt to send it because it was on the account-level suppression list.

    " + }, + "ComplaintFeedbackType":{ + "shape":"ComplaintFeedbackType", + "documentation":"

    The value of the Feedback-Type field from the feedback report received from the ISP.

    " + } + }, + "documentation":"

    Information about a Complaint event.

    " + }, + "ComplaintFeedbackType":{"type":"string"}, + "ComplaintSubType":{"type":"string"}, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -2133,6 +2270,34 @@ }, "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " }, + "CreateExportJobRequest":{ + "type":"structure", + "required":[ + "ExportDataSource", + "ExportDestination" + ], + "members":{ + "ExportDataSource":{ + "shape":"ExportDataSource", + "documentation":"

    The data source for the export job.

    " + }, + "ExportDestination":{ + "shape":"ExportDestination", + "documentation":"

    The destination for the export job.

    " + } + }, + "documentation":"

    Represents a request to create an export job from a data source to a data destination.

    " + }, + "CreateExportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    A string that represents the export job ID.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "CreateImportJobRequest":{ "type":"structure", "required":[ @@ -2240,7 +2405,7 @@ }, "DataFormat":{ "type":"string", - "documentation":"

    The data format of the import job's data source.

    ", + "documentation":"

    The data format of a file, can be one of the following:

    • CSV – A comma-separated values file.

    • JSON – A JSON file.

    ", "enum":[ "CSV", "JSON" @@ -2568,6 +2733,18 @@ "type":"string", "documentation":"

    The subject line for an email that you submitted in a predictive inbox placement test.

    " }, + "DeliveryEventType":{ + "type":"string", + "documentation":"

    The type of delivery events:

    • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

    • DELIVERY - SES successfully delivered the email to the recipient's mail server. Excludes deliveries to the mailbox simulator and emails addressed to more than one recipient.

    • TRANSIENT_BOUNCE - Feedback received for delivery failures excluding issues with non-existent mailboxes. Excludes bounces from the mailbox simulator, and those from emails addressed to more than one recipient.

    • PERMANENT_BOUNCE - Feedback received for emails sent to non-existent mailboxes. Excludes bounces from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    • UNDETERMINED_BOUNCE - SES was unable to determine the bounce reason.

    • COMPLAINT - Complaint received for the email. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    ", + "enum":[ + "SEND", + "DELIVERY", + "TRANSIENT_BOUNCE", + "PERMANENT_BOUNCE", + "UNDETERMINED_BOUNCE", + "COMPLAINT" + ] + }, "DeliveryOptions":{ "type":"structure", "members":{ @@ -2601,6 +2778,7 @@ }, "documentation":"

    An object that describes the recipients for an email.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

    " }, + "DiagnosticCode":{"type":"string"}, "DimensionName":{ "type":"string", "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " @@ -2826,6 +3004,11 @@ "member":{"shape":"DomainIspPlacement"} }, "EmailAddress":{"type":"string"}, + "EmailAddressFilterList":{ + "type":"list", + "member":{"shape":"InsightsEmailAddress"}, + "max":5 + }, "EmailAddressList":{ "type":"list", "member":{"shape":"EmailAddress"} @@ -2848,6 +3031,39 @@ }, "documentation":"

    An object that defines the entire content of the email, including the message headers and the body content. You can create a simple email message, in which you specify the subject and the text and HTML versions of the message body. You can also create raw messages, in which you specify a complete MIME-formatted message. Raw messages can include attachments and custom headers.

    " }, + "EmailInsights":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"InsightsEmailAddress", + "documentation":"

    The recipient of the email.

    " + }, + "Isp":{ + "shape":"Isp", + "documentation":"

    The recipient's ISP (e.g., Gmail, Yahoo, etc.).

    " + }, + "Events":{ + "shape":"InsightsEvents", + "documentation":"

    A list of events associated with the sent email.

    " + } + }, + "documentation":"

    An email's insights contain metadata and delivery information about a specific email.

    " + }, + "EmailInsightsList":{ + "type":"list", + "member":{"shape":"EmailInsights"} + }, + "EmailSubject":{ + "type":"string", + "max":998, + "min":1, + "sensitive":true + }, + "EmailSubjectFilterList":{ + "type":"list", + "member":{"shape":"EmailSubject"}, + "max":1 + }, "EmailTemplateContent":{ "type":"structure", "members":{ @@ -2909,6 +3125,14 @@ }, "Enabled":{"type":"boolean"}, "EnabledWrapper":{"type":"boolean"}, + "EngagementEventType":{ + "type":"string", + "documentation":"

    The type of delivery events:

    • OPEN - Open event for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Click event for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    ", + "enum":[ + "OPEN", + "CLICK" + ] + }, "ErrorMessage":{"type":"string"}, "Esp":{"type":"string"}, "Esps":{ @@ -2991,6 +3215,20 @@ "type":"list", "member":{"shape":"EventDestination"} }, + "EventDetails":{ + "type":"structure", + "members":{ + "Bounce":{ + "shape":"Bounce", + "documentation":"

    Information about a Bounce event.

    " + }, + "Complaint":{ + "shape":"Complaint", + "documentation":"

    Information about a Complaint event.

    " + } + }, + "documentation":"

    Contains a Bounce object if the event type is BOUNCE. Contains a Complaint object if the event type is COMPLAINT.

    " + }, "EventType":{ "type":"string", "documentation":"

    An email sending event type. For example, email sends, opens, and bounces are all email events.

    ", @@ -3011,6 +3249,110 @@ "type":"list", "member":{"shape":"EventType"} }, + "ExportDataSource":{ + "type":"structure", + "members":{ + "MetricsDataSource":{"shape":"MetricsDataSource"}, + "MessageInsightsDataSource":{"shape":"MessageInsightsDataSource"} + }, + "documentation":"

    An object that contains details about the data source of the export job. It can only contain one of MetricsDataSource or MessageInsightsDataSource object.

    " + }, + "ExportDestination":{ + "type":"structure", + "required":["DataFormat"], + "members":{ + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

    The data format of the final export job file, can be one of the following:

    • CSV - A comma-separated values file.

    • JSON - A Json file.

    " + }, + "S3Url":{ + "shape":"S3Url", + "documentation":"

    An Amazon S3 pre-signed URL that points to the generated export file.

    " + } + }, + "documentation":"

    An object that contains details about the destination of the export job.

    " + }, + "ExportDimensionValue":{ + "type":"list", + "member":{"shape":"MetricDimensionValue"}, + "max":10, + "min":1 + }, + "ExportDimensions":{ + "type":"map", + "key":{"shape":"MetricDimensionName"}, + "value":{"shape":"ExportDimensionValue"}, + "max":3, + "min":1 + }, + "ExportJobSummary":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    The source type of the export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of the export job.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was created.

    " + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was completed.

    " + } + }, + "documentation":"

    A summary of the export job.

    " + }, + "ExportJobSummaryList":{ + "type":"list", + "member":{"shape":"ExportJobSummary"}, + "documentation":"

    A list of the export job summaries.

    " + }, + "ExportMetric":{ + "type":"structure", + "members":{ + "Name":{"shape":"Metric"}, + "Aggregation":{"shape":"MetricAggregation"} + }, + "documentation":"

    An object that contains a mapping between a Metric and MetricAggregation.

    " + }, + "ExportMetrics":{ + "type":"list", + "member":{"shape":"ExportMetric"}, + "max":10, + "min":1 + }, + "ExportSourceType":{ + "type":"string", + "documentation":"

    The type of data source of an export, can be one of the following:

    • METRICS_DATA - The metrics export.

    • MESSAGE_INSIGHTS - The Message Insights export.

    ", + "enum":[ + "METRICS_DATA", + "MESSAGE_INSIGHTS" + ] + }, + "ExportStatistics":{ + "type":"structure", + "members":{ + "ProcessedRecordsCount":{ + "shape":"ProcessedRecordsCount", + "documentation":"

    The number of records that were processed to generate the final export file.

    " + }, + "ExportedRecordsCount":{ + "shape":"ExportedRecordsCount", + "documentation":"

    The number of records that were exported to the final export file.

    This value might not be available for all export source types

    " + } + }, + "documentation":"

    Statistics about the execution of an export job.

    " + }, + "ExportedRecordsCount":{"type":"integer"}, "FailedRecordsCount":{"type":"integer"}, "FailedRecordsS3Url":{"type":"string"}, "FailureInfo":{ @@ -3018,14 +3360,14 @@ "members":{ "FailedRecordsS3Url":{ "shape":"FailedRecordsS3Url", - "documentation":"

    An Amazon S3 presigned URL that contains all the failed records and related information.

    " + "documentation":"

    An Amazon S3 pre-signed URL that contains all the failed records and related information.

    " }, "ErrorMessage":{ "shape":"ErrorMessage", - "documentation":"

    A message about why the import job failed.

    " + "documentation":"

    A message about why the job failed.

    " } }, - "documentation":"

    An object that contains the failure details about an import job.

    " + "documentation":"

    An object that contains the failure details about a job.

    " }, "FailureRedirectionURL":{ "type":"string", @@ -3665,6 +4007,61 @@ }, "documentation":"

    The following element is returned by the service.

    " }, + "GetExportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    ", + "location":"uri", + "locationName":"JobId" + } + }, + "documentation":"

    Represents a request to retrieve information about an export job using the export job ID.

    " + }, + "GetExportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    The type of source of the export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of the export job.

    " + }, + "ExportDestination":{ + "shape":"ExportDestination", + "documentation":"

    The destination of the export job.

    " + }, + "ExportDataSource":{ + "shape":"ExportDataSource", + "documentation":"

    The data source of the export job.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was created.

    " + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was completed.

    " + }, + "FailureInfo":{ + "shape":"FailureInfo", + "documentation":"

    The failure details about an export job.

    " + }, + "Statistics":{ + "shape":"ExportStatistics", + "documentation":"

    The statistics about the export job.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "GetImportJobRequest":{ "type":"structure", "required":["JobId"], @@ -3720,6 +4117,45 @@ }, "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " }, + "GetMessageInsightsRequest":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    A MessageId is a unique identifier for a message, and is returned when sending emails through Amazon SES.

    ", + "location":"uri", + "locationName":"MessageId" + } + }, + "documentation":"

    A request to return information about a message.

    " + }, + "GetMessageInsightsResponse":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    A unique identifier for the message.

    " + }, + "FromEmailAddress":{ + "shape":"InsightsEmailAddress", + "documentation":"

    The from address used to send the message.

    " + }, + "Subject":{ + "shape":"EmailSubject", + "documentation":"

    The subject line of the message.

    " + }, + "EmailTags":{ + "shape":"MessageTagList", + "documentation":"

    A list of tags, in the form of name/value pairs, that were applied to the email you sent, along with Amazon SES Auto-Tags.

    " + }, + "Insights":{ + "shape":"EmailInsightsList", + "documentation":"

    A set of insights associated with the message.

    " + } + }, + "documentation":"

    Information about a message.

    " + }, "GetSuppressedDestinationRequest":{ "type":"structure", "required":["EmailAddress"], @@ -3883,6 +4319,34 @@ }, "documentation":"

    An object that contains information about the inbox placement data settings for a verified domain that’s associated with your Amazon Web Services account. This data is available only if you enabled the Deliverability dashboard for the domain.

    " }, + "InsightsEmailAddress":{ + "type":"string", + "max":320, + "min":1, + "sensitive":true + }, + "InsightsEvent":{ + "type":"structure", + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of the event.

    " + }, + "Type":{ + "shape":"EventType", + "documentation":"

    The type of event:

    • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

    • DELIVERY - SES successfully delivered the email to the recipient's mail server. Excludes deliveries to the mailbox simulator, and those from emails addressed to more than one recipient.

    • BOUNCE - Feedback received for delivery failures. Additional details about the bounce are provided in the Details object. Excludes bounces from the mailbox simulator, and those from emails addressed to more than one recipient.

    • COMPLAINT - Complaint received for the email. Additional details about the complaint are provided in the Details object. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    • OPEN - Open event for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Click event for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    " + }, + "Details":{ + "shape":"EventDetails", + "documentation":"

    Details about bounce or complaint events.

    " + } + }, + "documentation":"

    An object containing details about a specific event.

    " + }, + "InsightsEvents":{ + "type":"list", + "member":{"shape":"InsightsEvent"} + }, "InternalServiceErrorException":{ "type":"structure", "members":{ @@ -3908,6 +4372,12 @@ "type":"list", "member":{"shape":"Ip"} }, + "Isp":{"type":"string"}, + "IspFilterList":{ + "type":"list", + "member":{"shape":"Isp"}, + "max":5 + }, "IspName":{ "type":"string", "documentation":"

    The name of an email provider.

    " @@ -3936,17 +4406,18 @@ }, "JobId":{ "type":"string", - "documentation":"

    A string that represents the import job ID.

    ", + "documentation":"

    A string that represents a job ID.

    ", "min":1 }, "JobStatus":{ "type":"string", - "documentation":"

    The status of the import job.

    ", + "documentation":"

    The status of a job.

    • CREATED – Job has just been created.

    • PROCESSING – Job is processing.

    • ERROR – An error occurred during processing.

    • COMPLETED – Job has completed processing successfully.

    ", "enum":[ "CREATED", "PROCESSING", "COMPLETED", - "FAILED" + "FAILED", + "CANCELLED" ] }, "KinesisFirehoseDestination":{ @@ -3967,6 +4438,16 @@ }, "documentation":"

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " }, + "LastDeliveryEventList":{ + "type":"list", + "member":{"shape":"DeliveryEventType"}, + "max":5 + }, + "LastEngagementEventList":{ + "type":"list", + "member":{"shape":"EngagementEventType"}, + "max":2 + }, "LastFreshStart":{ "type":"timestamp", "documentation":"

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    " @@ -4313,6 +4794,42 @@ }, "documentation":"

    The following elements are returned by the service.

    " }, + "ListExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token returned from a previous call to ListExportJobs to indicate the position in the list of export jobs.

    " + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    Maximum number of export jobs to return at once. Use this parameter to paginate results. If additional export jobs exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent calls to ListExportJobs to retrieve additional export jobs.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    A value used to list export jobs that have a certain ExportSourceType.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    A value used to list export jobs that have a certain JobStatus.

    " + } + }, + "documentation":"

    Represents a request to list all export jobs with filters.

    " + }, + "ListExportJobsResponse":{ + "type":"structure", + "members":{ + "ExportJobs":{ + "shape":"ExportJobSummaryList", + "documentation":"

    A list of the export job summaries.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional export jobs available to be listed. Use this token to a subsequent call to ListExportJobs with the same parameters to retrieve the next page of export jobs.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "ListImportJobsRequest":{ "type":"structure", "members":{ @@ -4582,6 +5099,71 @@ "documentation":"

    The body of an email message.

    " }, "MessageData":{"type":"string"}, + "MessageInsightsDataSource":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the start date for the export interval as a timestamp. The start date is inclusive.

    " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the end date for the export interval as a timestamp. The end date is inclusive.

    " + }, + "Include":{ + "shape":"MessageInsightsFilters", + "documentation":"

    Filters for results to be included in the export file.

    " + }, + "Exclude":{ + "shape":"MessageInsightsFilters", + "documentation":"

    Filters for results to be excluded from the export file.

    " + }, + "MaxResults":{ + "shape":"MessageInsightsExportMaxResults", + "documentation":"

    The maximum number of results.

    " + } + }, + "documentation":"

    An object that contains filters applied when performing the Message Insights export.

    " + }, + "MessageInsightsExportMaxResults":{ + "type":"integer", + "max":10000, + "min":1 + }, + "MessageInsightsFilters":{ + "type":"structure", + "members":{ + "FromEmailAddress":{ + "shape":"EmailAddressFilterList", + "documentation":"

    The from address used to send the message.

    " + }, + "Destination":{ + "shape":"EmailAddressFilterList", + "documentation":"

    The recipient's email address.

    " + }, + "Subject":{ + "shape":"EmailSubjectFilterList", + "documentation":"

    The subject line of the message.

    " + }, + "Isp":{ + "shape":"IspFilterList", + "documentation":"

    The recipient's ISP (e.g., Gmail, Yahoo, etc.).

    " + }, + "LastDeliveryEvent":{ + "shape":"LastDeliveryEventList", + "documentation":"

    The last delivery-related event for the email, where the ordering is as follows: SEND < BOUNCE < DELIVERY < COMPLAINT.

    " + }, + "LastEngagementEvent":{ + "shape":"LastEngagementEventList", + "documentation":"

    The last engagement-related event for the email, where the ordering is as follows: OPEN < CLICK.

    Engagement events are only available if Engagement tracking is enabled.

    " + } + }, + "documentation":"

    An object containing Message Insights filters.

    If you specify multiple filters, the filters are joined by AND.

    If you specify multiple values for a filter, the values are joined by OR. Filter values are case-sensitive.

    FromEmailAddress, Destination, and Subject filters support partial match. A partial match is performed by using the * wildcard character placed at the beginning (suffix match), the end (prefix match) or both ends of the string (contains match). In order to match the literal characters * or \\, they must be escaped using the \\ character. If no wildcard character is present, an exact match is performed.

    " + }, "MessageRejected":{ "type":"structure", "members":{ @@ -4623,6 +5205,7 @@ }, "Metric":{ "type":"string", + "documentation":"

    The metric to export, can be one of the following:

    • SEND - Emails sent eligible for tracking in the VDM dashboard. This excludes emails sent to the mailbox simulator and emails addressed to more than one recipient.

    • COMPLAINT - Complaints received for your account. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those for emails addressed to more than one recipient

    • PERMANENT_BOUNCE - Permanent bounces - i.e., feedback received for emails sent to non-existent mailboxes. Excludes bounces from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those for emails addressed to more than one recipient.

    • TRANSIENT_BOUNCE - Transient bounces - i.e., feedback received for delivery failures excluding issues with non-existent mailboxes. Excludes bounces from the mailbox simulator, and those for emails addressed to more than one recipient.

    • OPEN - Unique open events for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Unique click events for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    • DELIVERY - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator and for emails addressed to more than one recipient.

    • DELIVERY_OPEN - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails without open trackers.

    • DELIVERY_CLICK - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails without click trackers.

    • DELIVERY_COMPLAINT - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails addressed to recipients hosted by ISPs with which Amazon SES does not have a feedback loop agreement.

    ", "enum":[ "SEND", "COMPLAINT", @@ -4636,6 +5219,14 @@ "DELIVERY_COMPLAINT" ] }, + "MetricAggregation":{ + "type":"string", + "documentation":"

    The aggregation to apply to a metric, can be one of the following:

    • VOLUME - The volume of events for this metric.

    • RATE - The rate for this metric relative to the SEND metric volume.

    ", + "enum":[ + "RATE", + "VOLUME" + ] + }, "MetricDataError":{ "type":"structure", "members":{ @@ -4689,7 +5280,10 @@ "ISP" ] }, - "MetricDimensionValue":{"type":"string"}, + "MetricDimensionValue":{ + "type":"string", + "documentation":"

    A list of values associated with the MetricDimensionName to filter metrics by. Can either be * as a wildcard for all values or a list of up to 10 specific values. If one Dimension has the * value, other dimensions can only contain one value.

    " + }, "MetricNamespace":{ "type":"string", "enum":["VDM"] @@ -4698,6 +5292,39 @@ "type":"list", "member":{"shape":"Counter"} }, + "MetricsDataSource":{ + "type":"structure", + "required":[ + "Dimensions", + "Namespace", + "Metrics", + "StartDate", + "EndDate" + ], + "members":{ + "Dimensions":{ + "shape":"ExportDimensions", + "documentation":"

    An object that contains a mapping between a MetricDimensionName and MetricDimensionValue to filter metrics by. Must contain a least 1 dimension but no more than 3 unique ones.

    " + }, + "Namespace":{ + "shape":"MetricNamespace", + "documentation":"

    The metrics namespace - e.g., VDM.

    " + }, + "Metrics":{ + "shape":"ExportMetrics", + "documentation":"

    A list of ExportMetric objects to export.

    " + }, + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the start date for the export interval as a timestamp.

    " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the end date for the export interval as a timestamp.

    " + } + }, + "documentation":"

    An object that contains details about the data source for the metrics export.

    " + }, "NextToken":{"type":"string"}, "NotFoundException":{ "type":"structure", @@ -5465,7 +6092,7 @@ }, "S3Url":{ "type":"string", - "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object>.

    ", + "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object> or a pre-signed URL.

    ", "pattern":"^s3:\\/\\/([^\\/]+)\\/(.*?([^\\/]+)\\/?)$" }, "ScalingMode":{ diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 9d33659b91d5..cefcce261088 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index b7d677ac95a1..3b5f19c795fe 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/customization.config b/services/shield/src/main/resources/codegen-resources/customization.config index 3beb25f98466..541a64eac33c 100644 --- a/services/shield/src/main/resources/codegen-resources/customization.config +++ b/services/shield/src/main/resources/codegen-resources/customization.config @@ -8,7 +8,7 @@ "listAttacks", "getSubscriptionState" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateEmergencyContactSettings", "updateSubscription", "describeDRTAccess", diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 430b649e3d49..550905ee6404 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 9c1f25d696d9..b3a1d2330882 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json index 38383d6e5e0c..b2b7e4f0a830 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://simspaceweaver-fips.us-gov-east-1.api.aws" + "url": "https://simspaceweaver-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver-fips.us-gov-east-1.amazonaws.com" + "url": "https://simspaceweaver-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://simspaceweaver.us-gov-east-1.api.aws" + "url": "https://simspaceweaver.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver.us-gov-east-1.amazonaws.com" + "url": "https://simspaceweaver.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://simspaceweaver-fips.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://simspaceweaver-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://simspaceweaver.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver.us-iso-east-1.c2s.ic.gov" + "url": "https://simspaceweaver.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://simspaceweaver-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver-fips.us-east-1.amazonaws.com" + "url": "https://simspaceweaver-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://simspaceweaver.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://simspaceweaver.us-east-1.amazonaws.com" + "url": "https://simspaceweaver.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,22 +247,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json index 7f94c800ddf9..c6ab3962a562 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json @@ -810,6 +810,7 @@ }, "S3Destination":{ "type":"structure", + "required":["BucketName"], "members":{ "BucketName":{ "shape":"BucketName", @@ -824,6 +825,10 @@ }, "S3Location":{ "type":"structure", + "required":[ + "BucketName", + "ObjectKey" + ], "members":{ "BucketName":{ "shape":"BucketName", diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 5ad2bc6b5679..cdc21426e0c9 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index e0d471a344f3..e11e3339db2e 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowball/src/main/resources/codegen-resources/customization.config b/services/snowball/src/main/resources/codegen-resources/customization.config index d6daf2b5dc08..50142bb8bc88 100644 --- a/services/snowball/src/main/resources/codegen-resources/customization.config +++ b/services/snowball/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createJob" ] } diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index ddb11fe120df..de9584d29e40 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 747ff7e72f95..0c88d475df91 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 421c160708dd..4b8997d57eba 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 7fdc973c0987..0c69c023b5ed 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/customization.config b/services/ssm/src/main/resources/codegen-resources/customization.config index 6d66b0458a9a..0f8e54aef312 100644 --- a/services/ssm/src/main/resources/codegen-resources/customization.config +++ b/services/ssm/src/main/resources/codegen-resources/customization.config @@ -20,7 +20,7 @@ "listResourceComplianceSummaries", "listResourceDataSync" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteAssociation", "describeAssociation", "listComplianceItems", diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 08b556710092..afd0da61372e 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index c882863dda2b..e8d692dd84ab 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index 217ba5160c88..f62840816940 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 2d17058e814e..0291dee6a4b5 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index a105038abf5d..9f8b865a3f01 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 8b0e2a424e47..f555f37e8837 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 1637090db503..6e1507580c4e 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index d3033d5f4efb..ce2aba1f1b67 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/support/pom.xml b/services/support/pom.xml index cc15ff17b2a5..f82deabc9241 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/support/src/main/resources/codegen-resources/customization.config b/services/support/src/main/resources/codegen-resources/customization.config index 6c7177e16e02..62198e1a488e 100644 --- a/services/support/src/main/resources/codegen-resources/customization.config +++ b/services/support/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "resolveCase", "describeSeverityLevels", "describeCases", diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index b44ea2751b93..2ddd2abf55f2 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 638d405a7113..67be7ae90b4a 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index e9655bd603dd..506f485f348c 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 9b344e3ead81..e659a817ce20 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 4f5393b47273..c514e8a9ee92 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 0f2f8db29e93..150d9d1b4c98 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index f46cae20bdd8..1574128b71d0 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 0386e1badf56..5e4bfa8a7d54 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index c2713f0a019e..0fadfbf3b8b6 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index f3e40f88268a..4a8cb5ed4843 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 673f1450cbd5..9deccf29a34e 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -4177,7 +4177,7 @@ "members":{ "UserSecretId":{ "shape":"SecretId", - "documentation":"

    The identifiers for the secrets (in Amazon Web Services Secrets Manager) that contain the SFTP user's private keys or passwords.

    " + "documentation":"

    The identifier for the secret (in Amazon Web Services Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret.

    " }, "TrustedHostKeys":{ "shape":"SftpConnectorTrustedHostKeyList", @@ -4266,7 +4266,7 @@ }, "SendFilePaths":{ "shape":"FilePaths", - "documentation":"

    One or more source paths for the Transfer Family server. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

    " + "documentation":"

    One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

    " }, "RetrieveFilePaths":{ "shape":"FilePaths", diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 60377c51d990..b05af6ec7a13 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 translate diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index 77f4b11c713d..e1ee1c46881e 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json index 1ee1c6c83984..3a5b73a9e4e2 100644 --- a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + "url": "https://verifiedpermissions-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" + "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + "url": "https://verifiedpermissions.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" + "url": "https://verifiedpermissions.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" + "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://verifiedpermissions-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" + "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://verifiedpermissions.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-east-1.amazonaws.com" + "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,13 +247,27 @@ } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -262,7 +276,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json index 04d8ee188f8f..d0e3f80e7eb1 100644 --- a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

    After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    To reference a user from this identity source in your Cedar policies, use the following syntax.

    IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

    Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

    ", + "documentation":"

    Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

    After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    To reference a user from this identity source in your Cedar policies, use the following syntax.

    IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

    Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicy":{ @@ -48,7 +48,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

    • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

    • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

    Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

    ", + "documentation":"

    Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

    • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

    • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

    Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicyStore":{ @@ -66,7 +66,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a policy store. A policy store is a container for policy resources.

    Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

    ", + "documentation":"

    Creates a policy store. A policy store is a container for policy resources.

    Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicyTemplate":{ @@ -85,7 +85,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

    ", + "documentation":"

    Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "DeleteIdentitySource":{ @@ -279,7 +279,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    " + "documentation":"

    Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    If you specify the identityToken parameter, then this operation derives the principal from that token. You must not also include that principal in the entities parameter or the operation fails and reports a conflict between the two entity sources.

    If you provide only an accessToken, then you can include the entity as part of the entities parameter to provide additional attributes.

    At this time, Verified Permissions accepts tokens from only Amazon Cognito.

    Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    " }, "ListIdentitySources":{ "name":"ListIdentitySources", @@ -365,7 +365,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

    ", + "documentation":"

    Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdateIdentitySource":{ @@ -384,7 +384,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

    ", + "documentation":"

    Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicy":{ @@ -404,7 +404,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

    If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

    ", + "documentation":"

    Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

    • If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

    • When you edit a static policy, You can change only certain elements of a static policy:

      • The action referenced by the policy.

      • A condition clause, such as when and unless.

      You can't change these elements of a static policy:

      • Changing a policy from a static policy to a template-linked policy.

      • Changing the effect of a static policy from permit or forbid.

      • The principal referenced by a static policy.

      • The resource referenced by a static policy.

    • To update a template-linked policy, you must update the template instead.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicyStore":{ @@ -423,7 +423,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Modifies the validation setting for a policy store.

    ", + "documentation":"

    Modifies the validation setting for a policy store.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicyTemplate":{ @@ -442,7 +442,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the specified policy template. You can update only the description and the some elements of the policyBody.

    Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

    ", + "documentation":"

    Updates the specified policy template. You can update only the description and the some elements of the policyBody.

    Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true } }, @@ -460,7 +460,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "ActionIdentifier":{ "type":"structure", @@ -484,7 +485,8 @@ "type":"string", "max":200, "min":1, - "pattern":"Action$|^.+::Action" + "pattern":"Action$|^.+::Action", + "sensitive":true }, "AttributeValue":{ "type":"structure", @@ -523,13 +525,15 @@ }, "BooleanAttribute":{ "type":"boolean", - "box":true + "box":true, + "sensitive":true }, "ClientId":{ "type":"string", "max":255, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "ClientIds":{ "type":"list", @@ -593,8 +597,7 @@ "ContextMap":{ "type":"map", "key":{"shape":"String"}, - "value":{"shape":"AttributeValue"}, - "min":0 + "value":{"shape":"AttributeValue"} }, "CreateIdentitySourceInput":{ "type":"structure", @@ -929,14 +932,14 @@ "EntityAttributes":{ "type":"map", "key":{"shape":"String"}, - "value":{"shape":"AttributeValue"}, - "min":0 + "value":{"shape":"AttributeValue"} }, "EntityId":{ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "EntityIdentifier":{ "type":"structure", @@ -973,12 +976,11 @@ "documentation":"

    The parents in the hierarchy that contains the entity.

    " } }, - "documentation":"

    Contains information about an entity that can be referenced in a Cedar policy.

    This data type is used as one of the fields in the EntitiesDefinition structure.

    { \"id\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"Attributes\": {}, \"Parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

    " + "documentation":"

    Contains information about an entity that can be referenced in a Cedar policy.

    This data type is used as one of the fields in the EntitiesDefinition structure.

    { \"identifier\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"attributes\": {}, \"parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

    " }, "EntityList":{ "type":"list", - "member":{"shape":"EntityItem"}, - "min":0 + "member":{"shape":"EntityItem"} }, "EntityReference":{ "type":"structure", @@ -999,7 +1001,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "EvaluationErrorItem":{ "type":"structure", @@ -1010,7 +1013,8 @@ "documentation":"

    The error description.

    " } }, - "documentation":"

    Contains a description of an evaluation error.

    This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

    " + "documentation":"

    Contains a description of an evaluation error.

    This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

    ", + "sensitive":true }, "EvaluationErrorList":{ "type":"list", @@ -1451,11 +1455,11 @@ }, "identityToken":{ "shape":"Token", - "documentation":"

    Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + "documentation":"

    Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, or both.

    " }, "accessToken":{ "shape":"Token", - "documentation":"

    Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + "documentation":"

    Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken, or an IdentityToken, or both.

    " }, "action":{ "shape":"ActionIdentifier", @@ -1471,7 +1475,7 @@ }, "entities":{ "shape":"EntitiesDefinition", - "documentation":"

    Specifies the list of resources and principals and their associated attributes that Verified Permissions can examine when evaluating the policies.

    You can include only principal and resource entities in this parameter; you can't include actions. You must specify actions in the schema.

    " + "documentation":"

    Specifies the list of resources and their associated attributes that Verified Permissions can examine when evaluating the policies.

    You can include only resource and action entities in this parameter; you can't include principals.

    • The IsAuthorizedWithToken operation takes principal attributes from only the identityToken or accessToken passed to the operation.

    • For action entities, you can include only their Identifier and EntityType.

    " } } }, @@ -1511,7 +1515,7 @@ }, "maxResults":{ "shape":"ListIdentitySourcesMaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 identity sources per response. You can specify a maximum of 200 identity sources per response.

    " }, "filters":{ "shape":"IdentitySourceFilters", @@ -1553,7 +1557,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policies per response. You can specify a maximum of 50 policies per response.

    " }, "filter":{ "shape":"PolicyFilter", @@ -1584,7 +1588,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policy stores per response. You can specify a maximum of 50 policy stores per response.

    " } } }, @@ -1616,7 +1620,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policy templates per response. You can specify a maximum of 50 policy templates per response.

    " } } }, @@ -1636,19 +1640,21 @@ }, "LongAttribute":{ "type":"long", - "box":true + "box":true, + "sensitive":true }, "MaxResults":{ "type":"integer", "box":true, - "max":20, + "max":50, "min":1 }, "Namespace":{ "type":"string", "max":100, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "NamespaceList":{ "type":"list", @@ -1796,7 +1802,8 @@ "PolicyStatement":{ "type":"string", "max":10000, - "min":1 + "min":1, + "sensitive":true }, "PolicyStoreId":{ "type":"string", @@ -1834,7 +1841,8 @@ "PolicyTemplateDescription":{ "type":"string", "max":150, - "min":0 + "min":0, + "sensitive":true }, "PolicyTemplateId":{ "type":"string", @@ -1889,7 +1897,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "PutSchemaInput":{ "type":"structure", @@ -2013,7 +2022,8 @@ "SchemaJson":{ "type":"string", "max":10000, - "min":1 + "min":1, + "sensitive":true }, "ServiceQuotaExceededException":{ "type":"structure", @@ -2090,10 +2100,14 @@ "StaticPolicyDescription":{ "type":"string", "max":150, - "min":0 + "min":0, + "sensitive":true }, "String":{"type":"string"}, - "StringAttribute":{"type":"string"}, + "StringAttribute":{ + "type":"string", + "sensitive":true + }, "TemplateLinkedPolicyDefinition":{ "type":"structure", "required":["policyTemplateId"], @@ -2177,7 +2191,8 @@ "type":"string", "max":131072, "min":1, - "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+" + "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+", + "sensitive":true }, "UpdateCognitoUserPoolConfiguration":{ "type":"structure", diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 630ab16447f0..7c64da1e4aff 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 83cae67e2855..15ef6b3975ff 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/vpclattice/src/main/resources/codegen-resources/endpoint-tests.json b/services/vpclattice/src/main/resources/codegen-resources/endpoint-tests.json index 79667cf0d608..62c8ea06bb88 100644 --- a/services/vpclattice/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/vpclattice/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://vpc-lattice-fips.us-gov-east-1.api.aws" + "url": "https://vpc-lattice-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice-fips.us-gov-east-1.amazonaws.com" + "url": "https://vpc-lattice-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://vpc-lattice.us-gov-east-1.api.aws" + "url": "https://vpc-lattice.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice.us-gov-east-1.amazonaws.com" + "url": "https://vpc-lattice.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://vpc-lattice-fips.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://vpc-lattice-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://vpc-lattice.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice.us-iso-east-1.c2s.ic.gov" + "url": "https://vpc-lattice.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://vpc-lattice-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice-fips.us-east-1.amazonaws.com" + "url": "https://vpc-lattice-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://vpc-lattice.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://vpc-lattice.us-east-1.amazonaws.com" + "url": "https://vpc-lattice.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -247,22 +247,35 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/vpclattice/src/main/resources/codegen-resources/service-2.json b/services/vpclattice/src/main/resources/codegen-resources/service-2.json index ebe3eab26a38..a3de35352cc9 100644 --- a/services/vpclattice/src/main/resources/codegen-resources/service-2.json +++ b/services/vpclattice/src/main/resources/codegen-resources/service-2.json @@ -147,6 +147,7 @@ "output":{"shape":"CreateServiceNetworkServiceAssociationResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, @@ -174,7 +175,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Associates a VPC with a service network. When you associate a VPC with the service network, it enables all the resources within that VPC to be clients and communicate with other services in the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide.

    You can't use this operation if there is a disassociation in progress. If the association fails, retry by deleting the association and recreating it.

    As a result of this operation, the association gets created in the service network account and the VPC owner account.

    If you add a security group to the service network and VPC association, the association must continue to always have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and recreate it without security groups.

    ", + "documentation":"

    Associates a VPC with a service network. When you associate a VPC with the service network, it enables all the resources within that VPC to be clients and communicate with other services in the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide.

    You can't use this operation if there is a disassociation in progress. If the association fails, retry by deleting the association and recreating it.

    As a result of this operation, the association gets created in the service network account and the VPC owner account.

    Once a security group is added to the VPC association it cannot be removed. You can add or update the security groups being used for the VPC association once a security group is attached. To remove all security groups you must reassociate the VPC.

    ", "idempotent":true }, "CreateTargetGroup":{ @@ -233,7 +234,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes the specified auth policy. If an auth is set to AWS_IAM and the auth policy is deleted, all requests will be denied by default. If you are trying to remove the auth policy completely, you must set the auth_type to NONE. If auth is enabled on the resource, but no auth policy is set, all requests will be denied.

    ", + "documentation":"

    Deletes the specified auth policy. If an auth is set to Amazon Web Services_IAM and the auth policy is deleted, all requests will be denied by default. If you are trying to remove the auth policy completely, you must set the auth_type to NONE. If auth is enabled on the resource, but no auth policy is set, all requests will be denied.

    ", "idempotent":true }, "DeleteListener":{ @@ -484,7 +485,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Retrieves information about the resource policy. The resource policy is an IAM policy created on behalf of the resource owner when they share a resource.

    " + "documentation":"

    Retrieves information about the resource policy. The resource policy is an IAM policy created by AWS RAM on behalf of the resource owner when they share a resource.

    " }, "GetRule":{ "name":"GetRule", @@ -587,6 +588,7 @@ "output":{"shape":"GetTargetGroupResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} @@ -604,6 +606,7 @@ "output":{"shape":"ListAccessLogSubscriptionsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], @@ -724,6 +727,7 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], @@ -780,7 +784,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates or updates the auth policy. The policy string in JSON must not contain newlines or blank lines.

    " + "documentation":"

    Creates or updates the auth policy.

    " }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -850,6 +854,7 @@ "output":{"shape":"UntagResourceResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], @@ -972,7 +977,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the service network and VPC association. If you add a security group to the service network and VPC association, the association must continue to always have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and recreate it without security groups.

    ", + "documentation":"

    Updates the service network and VPC association. Once you add a security group, it cannot be removed.

    ", "idempotent":true }, "UpdateTargetGroup":{ @@ -986,6 +991,7 @@ "output":{"shape":"UpdateTargetGroupResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, @@ -1102,8 +1108,7 @@ "AuthPolicyString":{ "type":"string", "max":10000, - "min":0, - "pattern":"^.*\\S.*$" + "min":0 }, "AuthType":{ "type":"string", @@ -2101,7 +2106,7 @@ }, "state":{ "shape":"AuthPolicyState", - "documentation":"

    The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the auth type is NONE, then any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

    " + "documentation":"

    The state of the auth policy. The auth policy is only active when the auth type is set to Amazon Web Services_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the auth type is NONE, then any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

    " } } }, @@ -2177,7 +2182,7 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

    The Amazon Resource Name (ARN) of the service network or service.

    ", + "documentation":"

    An IAM policy.

    ", "location":"uri", "locationName":"resourceArn" } @@ -2188,7 +2193,7 @@ "members":{ "policy":{ "shape":"PolicyString", - "documentation":"

    An IAM policy.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the service network or service.

    " } } }, @@ -2796,6 +2801,13 @@ "IPV6" ] }, + "LambdaEventStructureVersion":{ + "type":"string", + "enum":[ + "V1", + "V2" + ] + }, "ListAccessLogSubscriptionsRequest":{ "type":"structure", "required":["resourceIdentifier"], @@ -3335,7 +3347,7 @@ "members":{ "policy":{ "shape":"AuthPolicyString", - "documentation":"

    The auth policy. The policy string in JSON must not contain newlines or blank lines.

    " + "documentation":"

    The auth policy.

    " }, "resourceIdentifier":{ "shape":"ResourceIdentifier", @@ -3350,11 +3362,11 @@ "members":{ "policy":{ "shape":"AuthPolicyString", - "documentation":"

    The auth policy. The policy string in JSON must not contain newlines or blank lines.

    " + "documentation":"

    The auth policy.

    " }, "state":{ "shape":"AuthPolicyState", - "documentation":"

    The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

    " + "documentation":"

    The state of the auth policy. The auth policy is only active when the auth type is set to Amazon Web Services_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

    " } } }, @@ -3367,7 +3379,7 @@ "members":{ "policy":{ "shape":"PolicyString", - "documentation":"

    An IAM policy. The policy string in JSON must not contain newlines or blank lines.

    " + "documentation":"

    An IAM policy.

    " }, "resourceArn":{ "shape":"ResourceArn", @@ -3703,7 +3715,7 @@ "type":"string", "max":32, "min":32, - "pattern":"^servicenetwork-[0-9a-z]{17}$" + "pattern":"^sn-[0-9a-z]{17}$" }, "ServiceNetworkIdentifier":{ "type":"string", @@ -3719,7 +3731,7 @@ "type":"string", "max":63, "min":3, - "pattern":"^(?!servicenetwork-)(?![-])(?!.*[-]$)(?!.*[-]{2})[a-z0-9-]+$" + "pattern":"^(?![-])(?!.*[-]$)(?!.*[-]{2})[a-z0-9-]+$" }, "ServiceNetworkServiceAssociationArn":{ "type":"string", @@ -4104,11 +4116,6 @@ }, "TargetGroupConfig":{ "type":"structure", - "required":[ - "port", - "protocol", - "vpcIdentifier" - ], "members":{ "healthCheck":{ "shape":"HealthCheckConfig", @@ -4118,6 +4125,10 @@ "shape":"IpAddressType", "documentation":"

    The type of IP address used for the target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4.

    " }, + "lambdaEventStructureVersion":{ + "shape":"LambdaEventStructureVersion", + "documentation":"

    Lambda event structure version

    " + }, "port":{ "shape":"Port", "documentation":"

    The port on which the targets are listening. For HTTP, the default is 80. For HTTPS, the default is 443

    " @@ -4203,6 +4214,10 @@ "shape":"IpAddressType", "documentation":"

    The type of IP address used for the target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4.

    " }, + "lambdaEventStructureVersion":{ + "shape":"LambdaEventStructureVersion", + "documentation":"

    Lambda event structure version

    " + }, "lastUpdatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time that the target group was last updated, specified in ISO-8601 format.

    " @@ -4599,7 +4614,7 @@ "members":{ "securityGroupIds":{ "shape":"UpdateServiceNetworkVpcAssociationRequestSecurityGroupIdsList", - "documentation":"

    The IDs of the security groups.

    " + "documentation":"

    The IDs of the security groups. Once you add a security group, it cannot be removed.

    " }, "serviceNetworkVpcAssociationIdentifier":{ "shape":"ServiceNetworkVpcAssociationIdentifier", diff --git a/services/waf/pom.xml b/services/waf/pom.xml index f3c754d97cfc..bc372f041716 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/waf/src/main/resources/codegen-resources/waf/customization.config b/services/waf/src/main/resources/codegen-resources/waf/customization.config index 8da5cd6c947f..23503733d437 100644 --- a/services/waf/src/main/resources/codegen-resources/waf/customization.config +++ b/services/waf/src/main/resources/codegen-resources/waf/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listActivatedRulesInRuleGroup", "listLoggingConfigurations" ], diff --git a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config index c2cd16ad4d6e..e8d7cb991668 100644 --- a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config +++ b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config @@ -6,7 +6,7 @@ }, "sdkRequestBaseClassName": "WafRequest", "sdkResponseBaseClassName": "WafResponse", - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listActivatedRulesInRuleGroup", "listLoggingConfigurations" ], diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 6c5e75e39cc5..c8c0353c08e0 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/wafv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 0adc23f0eae5..ab79a70ec309 100644 --- a/services/wafv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/wafv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://wafv2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://wafv2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://wafv2-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://wafv2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://wafv2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://wafv2.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://wafv2.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://wafv2.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index 164678a61d07..f3f27e807800 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -1037,6 +1037,10 @@ "InspectionLevel":{ "shape":"InspectionLevel", "documentation":"

    The inspection level to use for the Bot Control rule group. The common level is the least expensive. The targeted level includes all common level rules and adds rules with more advanced inspection criteria. For details, see WAF Bot Control rule group in the WAF Developer Guide.

    " + }, + "EnableMachineLearning":{ + "shape":"Boolean", + "documentation":"

    Applies only to the targeted inspection level.

    Determines whether to use machine learning (ML) to analyze your web traffic for bot-related activity. Machine learning is required for the Bot Control rules TGT_ML_CoordinatedActivityLow and TGT_ML_CoordinatedActivityMedium, which inspect for anomalous behavior that might indicate distributed, coordinated bot activity.

    For more information about this choice, see the listing for these rules in the table at Bot Control rules listing in the WAF Developer Guide.

    Default: TRUE

    " } }, "documentation":"

    Details for your use of the Bot Control managed rule group, AWSManagedRulesBotControlRuleSet. This configuration is used in ManagedRuleGroupConfig.

    " @@ -1143,10 +1147,10 @@ "members":{ "RequestBody":{ "shape":"RequestBody", - "documentation":"

    Customizes the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default size is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " + "documentation":"

    Customizes the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default size is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " } }, - "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " + "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " }, "BlockAction":{ "type":"structure", @@ -1163,7 +1167,7 @@ "members":{ "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

    What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

    The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

    The options for oversize handling are the following:

    • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

    • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

    Default: CONTINUE

    " + "documentation":"

    What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

    The default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

    The options for oversize handling are the following:

    • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

    • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

    Default: CONTINUE

    " } }, "documentation":"

    Inspect the body of the web request. The body immediately follows the request headers.

    This is used to indicate the web request component to inspect, in the FieldToMatch specification.

    " @@ -1879,7 +1883,7 @@ }, "AssociationConfig":{ "shape":"AssociationConfig", - "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " + "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " } } }, @@ -2410,7 +2414,7 @@ }, "Body":{ "shape":"Body", - "documentation":"

    Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

    A limited amount of the request body is forwarded to WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 kilobytes) and for CloudFront distributions, the limit is 16 KB (16,384 kilobytes). For CloudFront distributions, you can increase the limit in the web ACL's AssociationConfig, for additional processing fees.

    For information about how to handle oversized request bodies, see the Body object configuration.

    " + "documentation":"

    Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

    A limited amount of the request body is forwarded to WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's AssociationConfig, for additional processing fees.

    For information about how to handle oversized request bodies, see the Body object configuration.

    " }, "Method":{ "shape":"Method", @@ -2418,7 +2422,7 @@ }, "JsonBody":{ "shape":"JsonBody", - "documentation":"

    Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

    A limited amount of the request body is forwarded to WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 kilobytes) and for CloudFront distributions, the limit is 16 KB (16,384 kilobytes). For CloudFront distributions, you can increase the limit in the web ACL's AssociationConfig, for additional processing fees.

    For information about how to handle oversized request bodies, see the JsonBody object configuration.

    " + "documentation":"

    Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.

    A limited amount of the request body is forwarded to WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's AssociationConfig, for additional processing fees.

    For information about how to handle oversized request bodies, see the JsonBody object configuration.

    " }, "Headers":{ "shape":"Headers", @@ -3262,7 +3266,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

    What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

    The default limit is 8 KB (8,192 kilobytes) for regional resources and 16 KB (16,384 kilobytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

    The options for oversize handling are the following:

    • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

    • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

    Default: CONTINUE

    " + "documentation":"

    What WAF should do if the body is larger than WAF can inspect. WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to WAF for inspection.

    The default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL AssociationConfig, for additional processing fees.

    The options for oversize handling are the following:

    • CONTINUE - Inspect the available body contents normally, according to the rule inspection criteria.

    • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    You can combine the MATCH or NO_MATCH settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.

    Default: CONTINUE

    " } }, "documentation":"

    Inspect the body of the web request as JSON. The body immediately follows the request headers.

    This is used to indicate the web request component to inspect, in the FieldToMatch specification.

    Use the specifications in this object to indicate which parts of the JSON body to inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON that result from the matches that you indicate.

    Example JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }

    " @@ -4851,10 +4855,10 @@ "members":{ "DefaultSizeInspectionLimit":{ "shape":"SizeInspectionLimit", - "documentation":"

    Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.

    Default: 16 KB (16,384 kilobytes)

    " + "documentation":"

    Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.

    Default: 16 KB (16,384 bytes)

    " } }, - "documentation":"

    Customizes the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default size is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    This is used in the AssociationConfig of the web ACL.

    " + "documentation":"

    Customizes the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default size is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    This is used in the AssociationConfig of the web ACL.

    " }, "RequestInspection":{ "type":"structure", @@ -5120,7 +5124,7 @@ "members":{ "Name":{ "shape":"EntityName", - "documentation":"

    The name of the rule. You can't change the name of a Rule after you create it.

    " + "documentation":"

    The name of the rule.

    If you change the name of a Rule after you create it and you want the rule's metric name to reflect the change, update the metric name in the rule's VisibilityConfig settings. WAF doesn't automatically update the metric name when you update the rule name.

    " }, "Priority":{ "shape":"RulePriority", @@ -5144,7 +5148,7 @@ }, "VisibilityConfig":{ "shape":"VisibilityConfig", - "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    " + "documentation":"

    Defines and enables Amazon CloudWatch metrics and web request sample collection.

    If you change the name of a Rule after you create it and you want the rule's metric name to reflect the change, update the metric name as well. WAF doesn't automatically update the metric name.

    " }, "CaptchaConfig":{ "shape":"CaptchaConfig", @@ -5475,7 +5479,7 @@ "documentation":"

    Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.

    " } }, - "documentation":"

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure WAF to inspect the request body, WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 kilobytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 kilobytes). For CloudFront web ACLs, you can increase the limit in the web ACL AssociationConfig, for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + "documentation":"

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure WAF to inspect the request body, WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL AssociationConfig, for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " }, "SizeInspectionLimit":{ "type":"string", @@ -5526,7 +5530,7 @@ }, "SizeConstraintStatement":{ "shape":"SizeConstraintStatement", - "documentation":"

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure WAF to inspect the request body, WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 kilobytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 kilobytes). For CloudFront web ACLs, you can increase the limit in the web ACL AssociationConfig, for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " + "documentation":"

    A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.

    If you configure WAF to inspect the request body, WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL AssociationConfig, for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.

    If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

    " }, "GeoMatchStatement":{ "shape":"GeoMatchStatement", @@ -6039,7 +6043,7 @@ }, "AssociationConfig":{ "shape":"AssociationConfig", - "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " + "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " } } }, @@ -6365,7 +6369,7 @@ }, "AssociationConfig":{ "shape":"AssociationConfig", - "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 kilobytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " + "documentation":"

    Specifies custom configurations for the associations between the web ACL and protected resources.

    Use this to customize the maximum size of the request body that your protected CloudFront distributions forward to WAF for inspection. The default is 16 KB (16,384 bytes).

    You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

    " } }, "documentation":"

    A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

    " diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index b09e5fe991dd..802cd7afbdcc 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index efc100fde133..c4be3283adab 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 62e4c30b2622..60527d5b2dcb 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/workdocs/src/main/resources/codegen-resources/customization.config b/services/workdocs/src/main/resources/codegen-resources/customization.config index 59ace42147ed..304bdf6f931b 100644 --- a/services/workdocs/src/main/resources/codegen-resources/customization.config +++ b/services/workdocs/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "describeUsers", "describeActivities", "getResources" diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index f319f11cca78..a878b6d88e0a 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index fc5461e39b7f..4e13e1092b2c 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 0f70544170dc..ff64205d226e 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 8e75ee17f631..4c977118b7e9 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/customization.config b/services/workspaces/src/main/resources/codegen-resources/customization.config index 8ed4e567f577..ab4975037913 100644 --- a/services/workspaces/src/main/resources/codegen-resources/customization.config +++ b/services/workspaces/src/main/resources/codegen-resources/customization.config @@ -7,7 +7,7 @@ "describeWorkspaces", "describeWorkspacesConnectionStatus" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeAccountModifications", "describeAccount" ] diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index 3f2c3fa83c7b..9ae85d1893c0 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json index 1552c84bcb89..7124ded3a94b 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json index 1efd59c263c9..3c4785fbd2e6 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json @@ -1380,6 +1380,7 @@ }, "BrowserSettingsSummary":{ "type":"structure", + "required":["browserSettingsArn"], "members":{ "browserSettingsArn":{ "shape":"ARN", @@ -1498,6 +1499,64 @@ }, "exception":true }, + "CookieDomain":{ + "type":"string", + "max":253, + "min":0, + "pattern":"^(\\.?)(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)*[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$" + }, + "CookieName":{ + "type":"string", + "max":4096, + "min":0 + }, + "CookiePath":{ + "type":"string", + "max":2000, + "min":0, + "pattern":"^/(\\S)*$" + }, + "CookieSpecification":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"CookieDomain", + "documentation":"

    The domain of the cookie.

    " + }, + "name":{ + "shape":"CookieName", + "documentation":"

    The name of the cookie.

    " + }, + "path":{ + "shape":"CookiePath", + "documentation":"

    The path of the cookie.

    " + } + }, + "documentation":"

    Specifies a single cookie or set of cookies in an end user's browser.

    " + }, + "CookieSpecifications":{ + "type":"list", + "member":{"shape":"CookieSpecification"}, + "max":10, + "min":0 + }, + "CookieSynchronizationConfiguration":{ + "type":"structure", + "required":["allowlist"], + "members":{ + "allowlist":{ + "shape":"CookieSpecifications", + "documentation":"

    The list of cookie specifications that are allowed to be synchronized to the remote browser.

    " + }, + "blocklist":{ + "shape":"CookieSpecifications", + "documentation":"

    The list of cookie specifications that are blocked from being synchronized to the remote browser.

    " + } + }, + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    ", + "sensitive":true + }, "CreateBrowserSettingsRequest":{ "type":"structure", "required":["browserPolicy"], @@ -1572,7 +1631,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " } } @@ -1778,15 +1837,27 @@ "uploadAllowed" ], "members":{ + "additionalEncryptionContext":{ + "shape":"EncryptionContextMap", + "documentation":"

    The additional encryption context of the user settings.

    " + }, "clientToken":{ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request.

    If you do not specify a client token, one is automatically generated by the AWS SDK.

    ", "idempotencyToken":true }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " }, + "customerManagedKey":{ + "shape":"keyArn", + "documentation":"

    The customer managed key used to encrypt sensitive information in the user settings.

    " + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

    The amount of time that a streaming session remains active after users disconnect.

    " @@ -1849,7 +1920,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -2125,7 +2196,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -2253,6 +2324,7 @@ }, "GetTrustStoreCertificateResponse":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "certificate":{ "shape":"Certificate", @@ -2332,7 +2404,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " }, "identityProviderDetails":{ @@ -2369,9 +2441,10 @@ }, "IdentityProviderSummary":{ "type":"structure", + "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " }, "identityProviderName":{ @@ -2455,6 +2528,7 @@ }, "IpAccessSettingsSummary":{ "type":"structure", + "required":["ipAccessSettingsArn"], "members":{ "creationDate":{ "shape":"Timestamp", @@ -2714,6 +2788,7 @@ }, "ListTrustStoreCertificatesResponse":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "certificateList":{ "shape":"CertificateSummaryList", @@ -2857,6 +2932,7 @@ }, "NetworkSettingsSummary":{ "type":"structure", + "required":["networkSettingsArn"], "members":{ "networkSettingsArn":{ "shape":"ARN", @@ -2877,6 +2953,7 @@ }, "Portal":{ "type":"structure", + "required":["portalArn"], "members":{ "authenticationType":{ "shape":"AuthenticationType", @@ -2961,6 +3038,7 @@ }, "PortalSummary":{ "type":"structure", + "required":["portalArn"], "members":{ "authenticationType":{ "shape":"AuthenticationType", @@ -3120,6 +3198,12 @@ "max":3, "min":2 }, + "SubresourceARN":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:[\\w+=\\/,.@-]+:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:[a-zA-Z]+(\\/[a-fA-F0-9\\-]{36}){2,}$" + }, "Tag":{ "type":"structure", "required":[ @@ -3240,6 +3324,7 @@ }, "TrustStore":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "associatedPortalArns":{ "shape":"ArnList", @@ -3333,7 +3418,7 @@ "idempotencyToken":true }, "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -3544,6 +3629,10 @@ "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request.

    If you do not specify a client token, one is automatically generated by the AWS SDK.

    ", "idempotencyToken":true }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    If the allowlist and blocklist are empty, the configuration becomes null.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " @@ -3615,6 +3704,7 @@ }, "UserAccessLoggingSettingsSummary":{ "type":"structure", + "required":["userAccessLoggingSettingsArn"], "members":{ "kinesisStreamArn":{ "shape":"KinesisStreamArn", @@ -3635,6 +3725,10 @@ "shape":"ArnList", "documentation":"

    A list of web portal ARNs that this user settings is associated with.

    " }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " @@ -3676,7 +3770,12 @@ }, "UserSettingsSummary":{ "type":"structure", + "required":["userSettingsArn"], "members":{ + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " diff --git a/services/xray/pom.xml b/services/xray/pom.xml index ae9252a6bab6..a1652644da8f 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/services/xray/src/main/resources/codegen-resources/customization.config b/services/xray/src/main/resources/codegen-resources/customization.config index 0d2f3c581410..323772188ef2 100644 --- a/services/xray/src/main/resources/codegen-resources/customization.config +++ b/services/xray/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "getSamplingRules", "getSamplingStatisticSummaries" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteSamplingRule", "getGroup" ], diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 68e47c72ae93..6581f30690fb 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 12b9240b3cd4..635c75e77900 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index 6b1cb368d486..8cdb71614e38 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -277,6 +277,31 @@ "requestAlgorithmMember": "ChecksumAlgorithm" } }, + "PutOperationWithRequestCompression":{ + "name":"PutOperationWithRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructure"}, + "output":{"shape":"RequestCompressionStructure"}, + "requestCompression": { + "encodings": ["gzip"] + } + }, + "PutOperationWithStreamingRequestCompression":{ + "name":"PutOperationWithStreamingRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructureWithStreaming"}, + "output":{"shape":"RequestCompressionStructureWithStreaming"}, + "requestCompression": { + "encodings": ["gzip"] + }, + "authtype":"v4-unsigned-body" + }, "GetOperationWithChecksum":{ "name":"GetOperationWithChecksum", "http":{ @@ -1007,6 +1032,28 @@ } }, "payload":"NestedQueryParameterOperation" + }, + "RequestCompressionStructure":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

    Object data.

    ", + "streaming":false + } + }, + "payload":"Body" + }, + "RequestCompressionStructureWithStreaming":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

    Object data.

    ", + "streaming":true + } + }, + "payload":"Body" } } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java new file mode 100644 index 000000000000..bad3735d509d --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java @@ -0,0 +1,205 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; + +public class AsyncRequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + private MockAsyncHttpClient mockAsyncHttpClient; + private ProtocolRestJsonAsyncClient asyncClient; + private Compressor compressor; + + @BeforeEach + public void setUp() { + mockAsyncHttpClient = new MockAsyncHttpClient(); + asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockAsyncHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); + compressedBody = new String(compressedBodyBytes); + compressedLen = compressedBodyBytes.length; + } + + @AfterEach + public void reset() { + mockAsyncHttpClient.reset(); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedSize).isEqualTo(UNCOMPRESSED_BODY.length()); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); + } + + @Test + public void asyncStreamingOperation_compressionEnabled_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload().get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload().get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(UNCOMPRESSED_BODY.getBytes())) + .subscribe(s); + } + }; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java new file mode 100644 index 000000000000..a4f85125e9c6 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -0,0 +1,231 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class RequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + private MockSyncHttpClient mockHttpClient; + private ProtocolRestJsonClient syncClient; + private Compressor compressor; + private RequestBody requestBody; + + @BeforeEach + public void setUp() { + mockHttpClient = new MockSyncHttpClient(); + syncClient = ProtocolRestJsonClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); + compressedLen = compressedBodyBytes.length; + compressedBody = new String(compressedBodyBytes); + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes()); + requestBody = RequestBody.fromContentProvider(provider, "binary/octet-stream"); + } + + @AfterEach + public void reset() { + mockHttpClient.reset(); + } + + @Test + public void syncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void syncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); + } + + @Test + public void syncStreamingOperation_compressionEnabled_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + @Test + public void syncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.compressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void syncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } + + private static final class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + private TestContentProvider(byte[] content) { + this.content = content; + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return createdStreams; + } + } + + private static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java index 879900b70dd0..e9a6f9b440e5 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -24,8 +24,10 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.net.URI; import java.time.Duration; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.junit.After; import org.junit.Before; @@ -41,6 +43,7 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.internal.metrics.SdkErrorType; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; @@ -52,6 +55,8 @@ import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.endpoints.ProtocolRestJsonEndpointParams; +import software.amazon.awssdk.services.protocolrestjson.endpoints.ProtocolRestJsonEndpointProvider; import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; import software.amazon.awssdk.services.protocolrestjson.model.SimpleStruct; import software.amazon.awssdk.services.protocolrestjson.paginators.PaginatedOperationWithResultKeyIterable; @@ -79,14 +84,17 @@ public class CoreMetricsTest { @Mock private MetricPublisher mockPublisher; + @Mock + private ProtocolRestJsonEndpointProvider mockEndpointProvider; + @Before public void setup() throws IOException { client = ProtocolRestJsonClient.builder() .httpClient(mockHttpClient) .region(Region.US_WEST_2) .credentialsProvider(mockCredentialsProvider) - .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) - .retryStrategy(b -> b.maxAttempts(MAX_ATTEMPTS))) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .endpointProvider(mockEndpointProvider) .build(); AbortableInputStream content = contentStream("{}"); SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() @@ -119,6 +127,11 @@ public void setup() throws IOException { } return AwsBasicCredentials.create("foo", "bar"); }); + + when(mockEndpointProvider.resolveEndpoint(any(ProtocolRestJsonEndpointParams.class))).thenReturn( + CompletableFuture.completedFuture(Endpoint.builder() + .url(URI.create("https://protocolrestjson.amazonaws.com")) + .build())); } @After @@ -186,6 +199,8 @@ public void testApiCall_operationSuccessful_addsMetrics() { assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT).get(0)).isEqualTo(URI.create( + "https://protocolrestjson.amazonaws.com")); assertThat(capturedCollection.children()).hasSize(1); MetricCollection attemptCollection = capturedCollection.children().get(0); @@ -283,6 +298,25 @@ public void testApiCall_httpClientThrowsNetworkError_errorTypeIncludedInMetrics( } } + @Test + public void testApiCall_endpointProviderAddsPathQueryFragment_notReportedInServiceEndpointMetric() { + when(mockEndpointProvider.resolveEndpoint(any(ProtocolRestJsonEndpointParams.class))) + .thenReturn(CompletableFuture.completedFuture(Endpoint.builder() + .url(URI.create("https://protocolrestjson.amazonaws.com:8080/foo?bar#baz")) + .build())); + + client.allTypes(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + URI expectedServiceEndpoint = URI.create("https://protocolrestjson.amazonaws.com:8080"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT)).containsExactly(expectedServiceEndpoint); + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); when(mockResponse.httpResponse()).thenReturn(httpResponse); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java index 9edb16d2e6b1..9923116651d5 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -212,6 +212,8 @@ private void verifyApiCallCollection(MetricCollection capturedCollection) { .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) .isGreaterThan(FIXED_DELAY); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT).get(0)).toString() + .startsWith("http://localhost"); } void stubSuccessfulResponse() { diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index abfc2e70578b..f6fbbee96344 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 2418d4052229..59f7c50b5511 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 9a2606614d55..efb68a642145 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 2be7cbef469c..04ea50be091d 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncFaultTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncFaultTest.java index b76bfd1bb0cb..d5dd94eaac4b 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncFaultTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncFaultTest.java @@ -22,12 +22,17 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.github.tomakehurst.wiremock.junit5.WireMockTest; +import java.io.IOException; import java.net.URI; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; @@ -39,19 +44,17 @@ import software.amazon.awssdk.services.protocolrestjson.model.StreamingOutputOperationResponse; import software.amazon.awssdk.utils.builder.SdkBuilder; - +@WireMockTest public class AsyncFaultTest { - @Rule - public WireMockRule wireMock = new WireMockRule(0); private ProtocolRestJsonAsyncClient client; - @Before - public void setup() { + @BeforeEach + public void setup(WireMockRuntimeInfo wiremock) { client = ProtocolRestJsonAsyncClient.builder() .region(Region.US_WEST_1) - .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointOverride(URI.create("http://localhost:" + wiremock.getHttpPort())) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .build(); @@ -66,6 +69,21 @@ public void subscriberCancel_correctExceptionThrown() { .hasRootCauseExactlyInstanceOf(SelfCancelException.class); } + @ParameterizedTest + @ValueSource(ints = {500, 200}) + @Timeout(value = 2) + public void requestContentLengthNotMatch_shouldThrowException(int statusCode) { + stubFor(post(anyUrl()) + .willReturn(aResponse() + .withBody("hello world") + .withHeader("content-length", String.valueOf(100)) + .withStatus(statusCode))); + assertThatThrownBy(() -> client.allTypes().join()) + .hasRootCauseExactlyInstanceOf(IOException.class) + .hasMessageContaining("Response had content-length of 100 bytes, but only received 11 bytes before the connection " + + "was closed"); + } + private static class CancelSubscriptionTransformer implements AsyncResponseTransformer> { diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index 89182d40cbc3..53a282ef7376 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index f3716ce9e821..cc1a58ef0829 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/.scripts/benchmark b/test/s3-benchmarks/.scripts/benchmark index 4fb56f0d4435..5eaa2182d035 100755 --- a/test/s3-benchmarks/.scripts/benchmark +++ b/test/s3-benchmarks/.scripts/benchmark @@ -64,7 +64,7 @@ if [ ! -d result ]; then fi sizes_str="1B 8MB+1 8MB-1 128MB 4GB 30GB" -versions_str="v1 v2 CRT" +versions_str="v1 v2 CRT java" sizes=( $sizes_str ) versions=( $versions_str ) diff --git a/test/s3-benchmarks/.scripts/create_benchmark_files b/test/s3-benchmarks/.scripts/create_benchmark_files index a43725314cf2..20f196a6bd82 100755 --- a/test/s3-benchmarks/.scripts/create_benchmark_files +++ b/test/s3-benchmarks/.scripts/create_benchmark_files @@ -1,14 +1,13 @@ -head -c 1B /dev/shm/1B -head -c 8388607B /dev/shm/8MB-1 -head -c 8388609B /dev/shm/8MB+1 -head -c 128M /dev/shm/128MB -head -c 4B /dev/shm/4GB -head -c 30GB /dev/shm/30GB - -head -c 1B /1B -head -c 8388607B /8MB-1 -head -c 8388609B /8MB+1 -head -c 128M /128MB -head -c 4B /4GB -head -c 30GB /30GB +head -c 1 /dev/shm/1B +head -c $((8*1024*1024-1)) /dev/shm/8MB-1 +head -c $((8*1024*1024+1)) /dev/shm/8MB+1 +head -c $((128*1024*1024)) /dev/shm/128MB +head -c $((4*1024*1024*1024)) /dev/shm/4GB +head -c $((30*1024*1024*1024)) /dev/shm/30GB +head -c 1 /1B +head -c $((8*1024*1024-1)) /8MB-1 +head -c $((8*1024*1024+1)) /8MB+1 +head -c $((128*1024*1024)) /128MB +head -c $((4*1024*1024*1024)) /4GB +head -c $((30*1024*1024*1024)) /30GB diff --git a/test/s3-benchmarks/README.md b/test/s3-benchmarks/README.md index 74a7436ba92b..5f0dc4bc6443 100755 --- a/test/s3-benchmarks/README.md +++ b/test/s3-benchmarks/README.md @@ -1,7 +1,6 @@ # S3 Benchmark Harness - -This module contains performance tests for `S3AsyncClient` and +This module contains performance tests for `S3AsyncClient` and `S3TransferManager` ## How to run @@ -17,6 +16,31 @@ java -jar s3-benchmarks.jar --bucket=bucket --key=key -file=/path/to/destionfile java -jar s3-benchmarks.jar --bucket=bucket --key=key -file=/path/to/sourcefile/ --operation=upload --partSizeInMB=20 --maxThroughput=100.0 ``` +## Command line arguments + +### Benchmark version + +The `--version` command line option is used to determine which component is under test: + +- `--version=crt` : Indicate to run the benchmark for the CRT's S3Client +- `--version=java` : Indicate to run the benchmark for the java based S3 Async Client (`MultipartS3AsyncClient` class) +- `--version=v2`: SDK v2 transfer manager (using `S3CrtAsyncClient` to delegate requests) +- `--version=v1`: SDK v1 transfer manager (using `AmazonS3Client` to delegate requests) + +### Operation + +The `--operation` command line argument determine which transfer operation is used + +|operation|supported version| +|---|-------| +|download | v1 v2 java crt | +|upload | v1 v2 java crt | +|download_directory | v1 v2 | +|upload_directory | v1 v2 | +|copy | v1 v2 java | + +> All command line argument can be found in the `BenchmarkRunner` class. + # Benchmark scripts Automation From the `.script` folder, use one of the `benchamrk` scripts to run a test suite. diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 366bfa81ff6a..df57a560143c 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 @@ -87,6 +87,16 @@ log4j-slf4j-impl compile + + software.amazon.awssdk + netty-nio-client + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-crt-client + ${awsjavasdk.version} + diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java new file mode 100644 index 000000000000..cf8e71246be8 --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.BENCHMARK_ITERATIONS; +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.DEFAULT_TIMEOUT; +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.printOutResult; +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +public abstract class BaseJavaS3ClientBenchmark implements TransferManagerBenchmark { + private static final Logger logger = Logger.loggerFor(BaseJavaS3ClientBenchmark.class); + + protected final S3Client s3Client; + + protected final S3AsyncClient s3AsyncClient; + protected final String bucket; + protected final String key; + protected final Duration timeout; + private final ChecksumAlgorithm checksumAlgorithm; + private final int iteration; + + protected BaseJavaS3ClientBenchmark(TransferManagerBenchmarkConfig config) { + this.bucket = Validate.paramNotNull(config.bucket(), "bucket"); + this.key = Validate.paramNotNull(config.key(), "key"); + this.timeout = Validate.getOrDefault(config.timeout(), () -> DEFAULT_TIMEOUT); + this.iteration = Validate.getOrDefault(config.iteration(), () -> BENCHMARK_ITERATIONS); + this.checksumAlgorithm = config.checksumAlgorithm(); + + this.s3Client = S3Client.create(); + + long partSizeInMb = Validate.paramNotNull(config.partSizeInMb(), "partSize"); + long readBufferInMb = Validate.paramNotNull(config.readBufferSizeInMb(), "readBufferSizeInMb"); + Validate.mutuallyExclusive("cannot use forceCrtHttpClient and connectionAcquisitionTimeoutInSec", + config.forceCrtHttpClient(), config.connectionAcquisitionTimeoutInSec()); + this.s3AsyncClient = S3AsyncClient.builder() + .multipartEnabled(true) + .multipartConfiguration(c -> c.minimumPartSizeInBytes(partSizeInMb * MB) + .thresholdInBytes(partSizeInMb * 2 * MB) + .apiCallBufferSizeInBytes(readBufferInMb * MB)) + .httpClientBuilder(httpClient(config)) + .build(); + } + + private SdkAsyncHttpClient.Builder httpClient(TransferManagerBenchmarkConfig config) { + if (config.forceCrtHttpClient()) { + logger.info(() -> "Using CRT HTTP client"); + AwsCrtAsyncHttpClient.Builder builder = AwsCrtAsyncHttpClient.builder(); + if (config.readBufferSizeInMb() != null) { + builder.readBufferSizeInBytes(config.readBufferSizeInMb() * MB); + } + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + return builder; + } + NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder(); + if (config.connectionAcquisitionTimeoutInSec() != null) { + Duration connAcqTimeout = Duration.ofSeconds(config.connectionAcquisitionTimeoutInSec()); + builder.connectionAcquisitionTimeout(connAcqTimeout); + } + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + return builder; + } + + protected abstract void sendOneRequest(List latencies) throws Exception; + + protected abstract long contentLength() throws Exception; + + @Override + public void run() { + try { + warmUp(); + doRunBenchmark(); + } catch (Exception e) { + logger.error(() -> "Exception occurred", e); + } finally { + cleanup(); + } + } + + private void cleanup() { + s3Client.close(); + s3AsyncClient.close(); + } + + private void warmUp() throws Exception { + logger.info(() -> "Starting to warm up"); + for (int i = 0; i < 3; i++) { + sendOneRequest(new ArrayList<>()); + Thread.sleep(500); + } + logger.info(() -> "Ending warm up"); + } + + private void doRunBenchmark() throws Exception { + List metrics = new ArrayList<>(); + for (int i = 0; i < iteration; i++) { + sendOneRequest(metrics); + } + printOutResult(metrics, "S3 Async client", contentLength()); + } + +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java index c4370033c678..d40680bca028 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3CrtAsyncClientBuilder; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; @@ -61,15 +62,18 @@ public abstract class BaseTransferManagerBenchmark implements TransferManagerBen logger.info(() -> "Benchmark config: " + config); Long partSizeInMb = config.partSizeInMb() == null ? null : config.partSizeInMb() * MB; Long readBufferSizeInMb = config.readBufferSizeInMb() == null ? null : config.readBufferSizeInMb() * MB; - s3 = S3CrtAsyncClient.builder() - .targetThroughputInGbps(config.targetThroughput()) - .minimumPartSizeInBytes(partSizeInMb) - .initialReadBufferSizeInBytes(readBufferSizeInMb) - .targetThroughputInGbps(config.targetThroughput() == null ? - Double.valueOf(100.0) : config.targetThroughput()) - .build(); - s3Sync = S3Client.builder() - .build(); + S3CrtAsyncClientBuilder builder = S3CrtAsyncClient.builder() + .targetThroughputInGbps(config.targetThroughput()) + .minimumPartSizeInBytes(partSizeInMb) + .initialReadBufferSizeInBytes(readBufferSizeInMb) + .targetThroughputInGbps(config.targetThroughput() == null ? + Double.valueOf(100.0) : + config.targetThroughput()); + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + s3 = builder.build(); + s3Sync = S3Client.builder().build(); transferManager = S3TransferManager.builder() .s3Client(s3) .build(); diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java index 6f1dfaecc707..d83fc87026ae 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java @@ -45,6 +45,11 @@ public final class BenchmarkRunner { private static final String TIMEOUT = "timeoutInMin"; + private static final String CONN_ACQ_TIMEOUT_IN_SEC = "connAcqTimeoutInSec"; + + private static final String FORCE_CRT_HTTP_CLIENT = "crtHttp"; + private static final String MAX_CONCURRENCY = "maxConcurrency"; + private static final Map> OPERATION_TO_BENCHMARK_V1 = new EnumMap<>(TransferManagerOperation.class); private static final Map> @@ -83,8 +88,8 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep options.addOption(null, CHECKSUM_ALGORITHM, true, "The checksum algorithm to use"); options.addOption(null, ITERATION, true, "The number of iterations"); options.addOption(null, READ_BUFFER_IN_MB, true, "Read buffer size in MB"); - options.addOption(null, VERSION, true, "The major version of the transfer manager to run test: v1 | v2 | crt, default: " - + "v2"); + options.addOption(null, VERSION, true, "The major version of the transfer manager to run test: " + + "v1 | v2 | crt | java, default: v2"); options.addOption(null, PREFIX, true, "S3 Prefix used in downloadDirectory and uploadDirectory"); options.addOption(null, CONTENT_LENGTH, true, "Content length to upload from memory. Used only in the " @@ -93,6 +98,12 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep options.addOption(null, TIMEOUT, true, "Amount of minute to wait before a single operation " + "times out and is cancelled. Optional, defaults to 10 minutes if no specified"); + options.addOption(null, CONN_ACQ_TIMEOUT_IN_SEC, true, "Timeout for acquiring an already-established" + + " connection from a connection pool to a remote service."); + options.addOption(null, FORCE_CRT_HTTP_CLIENT, true, + "Force the CRT http client to be used in JavaBased benchmarks"); + options.addOption(null, MAX_CONCURRENCY, true, + "The Maximum number of allowed concurrent requests. For HTTP/1.1 this is the same as max connections."); CommandLine cmd = parser.parse(options, args); TransferManagerBenchmarkConfig config = parseConfig(cmd); @@ -114,11 +125,22 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep if (operation == TransferManagerOperation.DOWNLOAD) { benchmark = new CrtS3ClientDownloadBenchmark(config); break; - } else if (operation == TransferManagerOperation.UPLOAD) { + } + if (operation == TransferManagerOperation.UPLOAD) { benchmark = new CrtS3ClientUploadBenchmark(config); break; } throw new UnsupportedOperationException(); + case JAVA: + if (operation == TransferManagerOperation.UPLOAD) { + benchmark = new JavaS3ClientUploadBenchmark(config); + break; + } + if (operation == TransferManagerOperation.COPY) { + benchmark = new JavaS3ClientCopyBenchmark(config); + break; + } + throw new UnsupportedOperationException("Java based s3 client benchmark only support upload and copy"); default: throw new UnsupportedOperationException(); } @@ -158,6 +180,15 @@ private static TransferManagerBenchmarkConfig parseConfig(CommandLine cmd) { Duration timeout = cmd.getOptionValue(TIMEOUT) == null ? null : Duration.ofMinutes(Long.parseLong(cmd.getOptionValue(TIMEOUT))); + Long connAcqTimeoutInSec = cmd.getOptionValue(CONN_ACQ_TIMEOUT_IN_SEC) == null ? null : + Long.parseLong(cmd.getOptionValue(CONN_ACQ_TIMEOUT_IN_SEC)); + + Boolean forceCrtHttpClient = cmd.getOptionValue(FORCE_CRT_HTTP_CLIENT) != null + && Boolean.parseBoolean(cmd.getOptionValue(FORCE_CRT_HTTP_CLIENT)); + + Integer maxConcurrency = cmd.getOptionValue(MAX_CONCURRENCY) == null ? null : + Integer.parseInt(cmd.getOptionValue(MAX_CONCURRENCY)); + return TransferManagerBenchmarkConfig.builder() .key(key) .bucket(bucket) @@ -171,6 +202,9 @@ private static TransferManagerBenchmarkConfig parseConfig(CommandLine cmd) { .prefix(prefix) .contentLengthInMb(contentLengthInMb) .timeout(timeout) + .connectionAcquisitionTimeoutInSec(connAcqTimeoutInSec) + .forceCrtHttpClient(forceCrtHttpClient) + .maxConcurrency(maxConcurrency) .build(); } @@ -185,6 +219,7 @@ public enum TransferManagerOperation { private enum SdkVersion { V1, V2, - CRT + CRT, + JAVA } } diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java new file mode 100644 index 000000000000..a2798a0e9cfd --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.COPY_SUFFIX; + +import java.util.List; +import software.amazon.awssdk.utils.Logger; + +public class JavaS3ClientCopyBenchmark extends BaseJavaS3ClientBenchmark { + private static final Logger log = Logger.loggerFor(JavaS3ClientCopyBenchmark.class); + + public JavaS3ClientCopyBenchmark(TransferManagerBenchmarkConfig config) { + super(config); + } + + @Override + protected void sendOneRequest(List latencies) throws Exception { + log.info(() -> "Starting copy"); + Double latency = runWithTime(s3AsyncClient.copyObject( + req -> req.sourceKey(key).sourceBucket(bucket) + .destinationBucket(bucket).destinationKey(key + COPY_SUFFIX) + )::join).latency(); + latencies.add(latency); + } + + @Override + protected long contentLength() throws Exception { + return s3Client.headObject(b -> b.bucket(bucket).key(key)).contentLength(); + } +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java new file mode 100644 index 000000000000..07aec5448a46 --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; + +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.utils.async.SimplePublisher; + +public class JavaS3ClientUploadBenchmark extends BaseJavaS3ClientBenchmark { + + private final String filePath; + private final Long contentLengthInMb; + private final Long partSizeInMb; + private final ChecksumAlgorithm checksumAlgorithm; + + public JavaS3ClientUploadBenchmark(TransferManagerBenchmarkConfig config) { + super(config); + this.filePath = config.filePath(); + this.contentLengthInMb = config.contentLengthInMb(); + this.partSizeInMb = config.partSizeInMb(); + this.checksumAlgorithm = config.checksumAlgorithm(); + } + + @Override + protected void sendOneRequest(List latencies) throws Exception { + if (filePath == null) { + double latency = uploadFromMemory(); + latencies.add(latency); + return; + } + Double latency = runWithTime( + s3AsyncClient.putObject(req -> req.key(key).bucket(bucket).checksumAlgorithm(checksumAlgorithm), + Paths.get(filePath))::join).latency(); + latencies.add(latency); + } + + private double uploadFromMemory() throws Exception { + if (contentLengthInMb == null) { + throw new UnsupportedOperationException("Java upload benchmark - contentLengthInMb required for upload from memory"); + } + long partSizeInBytes = partSizeInMb * MB; + // upload using known content length + SimplePublisher publisher = new SimplePublisher<>(); + byte[] bytes = new byte[(int) partSizeInBytes]; + Thread uploadThread = Executors.defaultThreadFactory().newThread(() -> { + long remaining = contentLengthInMb * MB; + while (remaining > 0) { + publisher.send(ByteBuffer.wrap(bytes)); + remaining -= partSizeInBytes; + } + publisher.complete(); + }); + CompletableFuture responseFuture = + s3AsyncClient.putObject(r -> r.bucket(bucket) + .key(key) + .contentLength(contentLengthInMb * MB) + .checksumAlgorithm(checksumAlgorithm), + AsyncRequestBody.fromPublisher(publisher)); + uploadThread.start(); + long start = System.currentTimeMillis(); + responseFuture.get(timeout.getSeconds(), TimeUnit.SECONDS); + long end = System.currentTimeMillis(); + return (end - start) / 1000.0; + } + + @Override + protected long contentLength() throws Exception { + return filePath != null + ? Files.size(Paths.get(filePath)) + : contentLengthInMb * MB; + } +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java index 89f3362bc658..c182934f4e3f 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.s3benchmarks; +import java.util.function.Supplier; + /** * Factory to create the benchmark */ @@ -66,4 +68,29 @@ static TransferManagerBenchmark v1Copy(TransferManagerBenchmarkConfig config) { return new V1TransferManagerCopyBenchmark(config); } + default TimedResult runWithTime(Supplier toRun) { + long start = System.currentTimeMillis(); + T result = toRun.get(); + long end = System.currentTimeMillis(); + return new TimedResult<>(result, (end - start) / 1000.0); + } + + final class TimedResult { + private final Double latency; + private final T result; + + public TimedResult(T result, Double latency) { + this.result = result; + this.latency = latency; + } + + public Double latency() { + return latency; + } + + public T result() { + return result; + } + + } } diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java index b0a6f85a38c2..a3750f472498 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java @@ -17,6 +17,7 @@ import java.time.Duration; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.utils.ToString; public final class TransferManagerBenchmarkConfig { private final String filePath; @@ -28,6 +29,10 @@ public final class TransferManagerBenchmarkConfig { private final Integer iteration; private final Long contentLengthInMb; private final Duration timeout; + private final Long memoryUsageInMb; + private final Long connectionAcquisitionTimeoutInSec; + private final Boolean forceCrtHttpClient; + private final Integer maxConcurrency; private final Long readBufferSizeInMb; private final BenchmarkRunner.TransferManagerOperation operation; @@ -46,6 +51,10 @@ private TransferManagerBenchmarkConfig(Builder builder) { this.prefix = builder.prefix; this.contentLengthInMb = builder.contentLengthInMb; this.timeout = builder.timeout; + this.memoryUsageInMb = builder.memoryUsage; + this.connectionAcquisitionTimeoutInSec = builder.connectionAcquisitionTimeoutInSec; + this.forceCrtHttpClient = builder.forceCrtHttpClient; + this.maxConcurrency = builder.maxConcurrency; } public String filePath() { @@ -96,25 +105,46 @@ public Duration timeout() { return this.timeout; } + public Long memoryUsageInMb() { + return this.memoryUsageInMb; + } + + public Long connectionAcquisitionTimeoutInSec() { + return this.connectionAcquisitionTimeoutInSec; + } + + public boolean forceCrtHttpClient() { + return this.forceCrtHttpClient; + } + + public Integer maxConcurrency() { + return this.maxConcurrency; + } + public static Builder builder() { return new Builder(); } @Override public String toString() { - return "{" + - "filePath: '" + filePath + '\'' + - ", bucket: '" + bucket + '\'' + - ", key: '" + key + '\'' + - ", targetThroughput: " + targetThroughput + - ", partSizeInMb: " + partSizeInMb + - ", checksumAlgorithm: " + checksumAlgorithm + - ", iteration: " + iteration + - ", readBufferSizeInMb: " + readBufferSizeInMb + - ", operation: " + operation + - ", contentLengthInMb: " + contentLengthInMb + - ", timeout:" + timeout + - '}'; + return ToString.builder("TransferManagerBenchmarkConfig") + .add("filePath", filePath) + .add("bucket", bucket) + .add("key", key) + .add("targetThroughput", targetThroughput) + .add("partSizeInMb", partSizeInMb) + .add("checksumAlgorithm", checksumAlgorithm) + .add("iteration", iteration) + .add("contentLengthInMb", contentLengthInMb) + .add("timeout", timeout) + .add("memoryUsageInMb", memoryUsageInMb) + .add("connectionAcquisitionTimeoutInSec", connectionAcquisitionTimeoutInSec) + .add("forceCrtHttpClient", forceCrtHttpClient) + .add("maxConcurrency", maxConcurrency) + .add("readBufferSizeInMb", readBufferSizeInMb) + .add("operation", operation) + .add("prefix", prefix) + .build(); } static final class Builder { @@ -126,6 +156,10 @@ static final class Builder { private Double targetThroughput; private Long partSizeInMb; private Long contentLengthInMb; + private Long memoryUsage; + private Long connectionAcquisitionTimeoutInSec; + private Boolean forceCrtHttpClient; + private Integer maxConcurrency; private Integer iteration; private BenchmarkRunner.TransferManagerOperation operation; @@ -193,6 +227,26 @@ public Builder timeout(Duration timeout) { return this; } + public Builder memoryUsageInMb(Long memoryUsage) { + this.memoryUsage = memoryUsage; + return this; + } + + public Builder connectionAcquisitionTimeoutInSec(Long connectionAcquisitionTimeoutInSec) { + this.connectionAcquisitionTimeoutInSec = connectionAcquisitionTimeoutInSec; + return this; + } + + public Builder forceCrtHttpClient(Boolean forceCrtHttpClient) { + this.forceCrtHttpClient = forceCrtHttpClient; + return this; + } + + public Builder maxConcurrency(Integer maxConcurrency) { + this.maxConcurrency = maxConcurrency; + return this; + } + public TransferManagerBenchmarkConfig build() { return new TransferManagerBenchmarkConfig(this); } diff --git a/test/s3-benchmarks/src/main/resources/log4j2.properties b/test/s3-benchmarks/src/main/resources/log4j2.properties index 58a399c44f10..e4d18ecc6eac 100644 --- a/test/s3-benchmarks/src/main/resources/log4j2.properties +++ b/test/s3-benchmarks/src/main/resources/log4j2.properties @@ -43,3 +43,6 @@ rootLogger.appenderRef.file.ref = FileAppender # #logger.netty.name = io.netty.handler.logging #logger.netty.level = debug + +#logger.s3mpu.name = software.amazon.awssdk.services.s3.internal.multipart +#logger.s3mpu.level = debug \ No newline at end of file diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index c2ad4ff9dc71..b14f75b46119 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml @@ -205,6 +205,11 @@ ${awsjavasdk.version} compile + + commons-cli + commons-cli + compile + @@ -368,6 +373,8 @@ -classpath software.amazon.awssdk.benchmark.BenchmarkRunner + + -c diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java index 580471fa1a3b..938aa0de3c08 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java @@ -24,6 +24,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -34,6 +35,7 @@ import software.amazon.awssdk.benchmark.stats.SdkBenchmarkParams; import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; import software.amazon.awssdk.benchmark.stats.SdkBenchmarkStatistics; +import software.amazon.awssdk.benchmark.utils.BenchmarkProcessorOutput; import software.amazon.awssdk.utils.Logger; @@ -66,15 +68,18 @@ class BenchmarkResultProcessor { * Process benchmark results * * @param results the results of the benchmark - * @return the benchmark Id that failed the regression + * @return the benchmark results */ - List processBenchmarkResult(Collection results) { - List currentData = new ArrayList<>(); + BenchmarkProcessorOutput processBenchmarkResult(Collection results) { + Map benchmarkResults = new HashMap<>(); + for (RunResult result : results) { String benchmarkId = getBenchmarkId(result.getParams()); + SdkBenchmarkResult sdkBenchmarkData = constructSdkBenchmarkResult(result); + + benchmarkResults.put(benchmarkId, sdkBenchmarkData); SdkBenchmarkResult baselineResult = baseline.get(benchmarkId); - SdkBenchmarkResult sdkBenchmarkData = constructSdkBenchmarkResult(result); if (baselineResult == null) { log.warn(() -> { @@ -90,15 +95,14 @@ List processBenchmarkResult(Collection results) { continue; } - currentData.add(sdkBenchmarkData); - if (!validateBenchmarkResult(sdkBenchmarkData, baselineResult)) { failedBenchmarkIds.add(benchmarkId); } } - log.info(() -> "Current result: " + serializeResult(currentData)); - return failedBenchmarkIds; + BenchmarkProcessorOutput output = new BenchmarkProcessorOutput(benchmarkResults, failedBenchmarkIds); + log.info(() -> "Current result: " + serializeResult(output)); + return output; } private SdkBenchmarkResult constructSdkBenchmarkResult(RunResult runResult) { @@ -169,9 +173,9 @@ private boolean validateBenchmarkParams(SdkBenchmarkParams current, SdkBenchmark return current.getMode() == baseline.getMode(); } - private String serializeResult(List currentData) { + private String serializeResult(BenchmarkProcessorOutput processorOutput) { try { - return OBJECT_MAPPER.writeValueAsString(currentData); + return OBJECT_MAPPER.writeValueAsString(processorOutput); } catch (JsonProcessingException e) { log.error(() -> "Failed to serialize current result", e); } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java index 92ca28d12acc..4c49f0270a87 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java @@ -15,11 +15,23 @@ package software.amazon.awssdk.benchmark; -import com.fasterxml.jackson.core.JsonProcessingException; +import static software.amazon.awssdk.benchmark.utils.BenchmarkConstant.OBJECT_MAPPER; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.stream.Collectors; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; import org.openjdk.jmh.results.RunResult; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.RunnerException; @@ -45,6 +57,8 @@ import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark; import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark; import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark; +import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; +import software.amazon.awssdk.benchmark.utils.BenchmarkProcessorOutput; import software.amazon.awssdk.utils.Logger; @@ -84,13 +98,15 @@ public class BenchmarkRunner { private final List benchmarksToRun; private final BenchmarkResultProcessor resultProcessor; + private final BenchmarkRunnerOptions options; - private BenchmarkRunner(List benchmarksToRun) { + private BenchmarkRunner(List benchmarksToRun, BenchmarkRunnerOptions options) { this.benchmarksToRun = benchmarksToRun; this.resultProcessor = new BenchmarkResultProcessor(); + this.options = options; } - public static void main(String... args) throws RunnerException, JsonProcessingException { + public static void main(String... args) throws Exception { List benchmarksToRun = new ArrayList<>(); benchmarksToRun.addAll(SYNC_BENCHMARKS); benchmarksToRun.addAll(ASYNC_BENCHMARKS); @@ -99,13 +115,14 @@ public static void main(String... args) throws RunnerException, JsonProcessingEx log.info(() -> "Skipping tests, to reduce benchmark times: \n" + MAPPER_BENCHMARKS + "\n" + METRIC_BENCHMARKS); - - BenchmarkRunner runner = new BenchmarkRunner(benchmarksToRun); + BenchmarkRunner runner = new BenchmarkRunner(benchmarksToRun, parseOptions(args)); runner.runBenchmark(); } private void runBenchmark() throws RunnerException { + log.info(() -> "Running with options: " + options); + ChainedOptionsBuilder optionsBuilder = new OptionsBuilder(); benchmarksToRun.forEach(optionsBuilder::include); @@ -114,11 +131,70 @@ private void runBenchmark() throws RunnerException { Collection results = new Runner(optionsBuilder.build()).run(); - List failedResult = resultProcessor.processBenchmarkResult(results); + BenchmarkProcessorOutput processedResults = resultProcessor.processBenchmarkResult(results); + List failedResults = processedResults.getFailedBenchmarks(); + + if (options.outputPath != null) { + log.info(() -> "Writing results to " + options.outputPath); + writeResults(processedResults, options.outputPath); + } + + if (options.check && !failedResults.isEmpty()) { + log.info(() -> "Failed perf regression tests: " + failedResults); + throw new RuntimeException("Perf regression tests failed: " + failedResults); + } + } + + private static BenchmarkRunnerOptions parseOptions(String[] args) throws ParseException { + Options cliOptions = new Options(); + cliOptions.addOption("o", "output", true, + "The path to write the benchmark results to."); + cliOptions.addOption("c", "check", false, + "If specified, exit with error code 1 if the results are not within the baseline."); + + CommandLineParser parser = new DefaultParser(); + CommandLine cmdLine = parser.parse(cliOptions, args); + + BenchmarkRunnerOptions options = new BenchmarkRunnerOptions() + .check(cmdLine.hasOption("c")); + + if (cmdLine.hasOption("o")) { + options.outputPath(Paths.get(cmdLine.getOptionValue("o"))); + } + + return options; + } + + private static void writeResults(BenchmarkProcessorOutput output, Path outputPath) { + List results = output.getBenchmarkResults().values().stream().collect(Collectors.toList()); + try (OutputStream os = Files.newOutputStream(outputPath)) { + OBJECT_MAPPER.writeValue(os, results); + } catch (IOException e) { + log.error(() -> "Failed to write the results to " + outputPath, e); + throw new RuntimeException(e); + } + } + + private static class BenchmarkRunnerOptions { + private Path outputPath; + private boolean check; + + public BenchmarkRunnerOptions outputPath(Path outputPath) { + this.outputPath = outputPath; + return this; + } + + public BenchmarkRunnerOptions check(boolean check) { + this.check = check; + return this; + } - if (!failedResult.isEmpty()) { - log.info(() -> "Failed perf regression tests: " + failedResult); - throw new RuntimeException("Perf regression tests failed: " + failedResult); + @Override + public String toString() { + return "BenchmarkRunnerOptions{" + + "outputPath=" + outputPath + + ", check=" + check + + '}'; } } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java index f419405bb690..c04bcc9dca52 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java @@ -17,7 +17,6 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonSerializer; @@ -25,7 +24,7 @@ import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import java.io.IOException; -import java.time.LocalDateTime; +import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.infra.BenchmarkParams; @@ -46,9 +45,9 @@ public class SdkBenchmarkParams { private Mode mode; - @JsonSerialize(using = LocalDateSerializer.class) - @JsonDeserialize(using = LocalDateDeserializer.class) - private LocalDateTime date; + @JsonSerialize(using = ZonedDateSerializer.class) + @JsonDeserialize(using = ZonedDateDeserializer.class) + private ZonedDateTime date; public SdkBenchmarkParams() { } @@ -59,7 +58,7 @@ public SdkBenchmarkParams(BenchmarkParams benchmarkParams) { this.jvmName = benchmarkParams.getVmName(); this.jvmVersion = benchmarkParams.getVmVersion(); this.mode = benchmarkParams.getMode(); - this.date = LocalDateTime.now(); + this.date = ZonedDateTime.now(); } public String getSdkVersion() { @@ -94,11 +93,11 @@ public void setJvmVersion(String jvmVersion) { this.jvmVersion = jvmVersion; } - public LocalDateTime getDate() { + public ZonedDateTime getDate() { return date; } - public void setDate(LocalDateTime date) { + public void setDate(ZonedDateTime date) { this.date = date; } @@ -110,18 +109,18 @@ public void setMode(Mode mode) { this.mode = mode; } - private static class LocalDateSerializer extends JsonSerializer { + private static class ZonedDateSerializer extends JsonSerializer { @Override - public void serialize(LocalDateTime value, JsonGenerator gen, SerializerProvider serializers) throws IOException { - gen.writeString(value.format(DateTimeFormatter.ISO_LOCAL_DATE_TIME)); + public void serialize(ZonedDateTime value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeString(value.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)); } } - private static class LocalDateDeserializer extends JsonDeserializer { + private static class ZonedDateDeserializer extends JsonDeserializer { @Override - public LocalDateTime deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException { - return LocalDateTime.parse(p.readValueAs(String.class)); + public ZonedDateTime deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + return ZonedDateTime.parse(p.getValueAsString()); } } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java new file mode 100644 index 000000000000..902ac3034730 --- /dev/null +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.benchmark.utils; + +import com.fasterxml.jackson.annotation.JsonCreator; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; + +/** + * The output object of the benchmark processor. This contains the results of the all the benchmarks that were run, and the + * list of benchmarks that failed. + */ +public final class BenchmarkProcessorOutput { + private final Map benchmarkResults; + private final List failedBenchmarks; + + @JsonCreator + public BenchmarkProcessorOutput(Map benchmarkResults, List failedBenchmarks) { + this.benchmarkResults = benchmarkResults; + this.failedBenchmarks = failedBenchmarks; + } + + public Map getBenchmarkResults() { + return benchmarkResults; + } + + public List getFailedBenchmarks() { + return failedBenchmarks; + } +} diff --git a/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json b/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json index c09d97fbbfc4..85489ea9439e 100644 --- a/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json +++ b/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json @@ -7,7 +7,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.308" + "date": "2020-03-18T20:11:42.308-07:00[America/Los_Angeles]" }, "statistics": { "mean": 11083.712145086858, @@ -18,7 +18,8 @@ "n": 10, "sum": 110837.12145086858 } - }, { + }, + { "id": "apicall.httpclient.async.NettyClientH1NonTlsBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -26,7 +27,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3133.078992847664, @@ -37,7 +38,8 @@ "n": 10, "sum": 31330.78992847664 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.concurrentApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -45,7 +47,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9400.788325804802, @@ -56,7 +58,8 @@ "n": 10, "sum": 94007.88325804802 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.concurrentApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -64,7 +67,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10081.234880927226, @@ -75,7 +78,8 @@ "n": 10, "sum": 100812.34880927225 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.sequentialApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -83,7 +87,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2318.064309904416, @@ -94,7 +98,8 @@ "n": 10, "sum": 23180.64309904416 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.sequentialApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -102,7 +107,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2668.2980888540214, @@ -113,7 +118,8 @@ "n": 10, "sum": 26682.980888540213 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.concurrentApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -121,7 +127,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6452.047990499835, @@ -132,7 +138,8 @@ "n": 10, "sum": 64520.47990499835 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.concurrentApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -140,7 +147,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7299.549654768969, @@ -151,7 +158,8 @@ "n": 10, "sum": 72995.49654768969 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.sequentialApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -159,7 +167,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2253.2698214846414, @@ -170,7 +178,8 @@ "n": 10, "sum": 22532.698214846412 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.sequentialApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -178,7 +187,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2349.62389971199, @@ -189,7 +198,8 @@ "n": 10, "sum": 23496.238997119897 } - }, { + }, + { "id": "apicall.httpclient.sync.ApacheHttpClientBenchmark.concurrentApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -197,7 +207,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 15097.57607845867, @@ -208,7 +218,8 @@ "n": 10, "sum": 150975.7607845867 } - }, { + }, + { "id": "apicall.httpclient.sync.ApacheHttpClientBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -216,7 +227,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3932.902248629381, @@ -227,7 +238,8 @@ "n": 10, "sum": 39329.02248629381 } - }, { + }, + { "id": "apicall.httpclient.sync.UrlConnectionHttpClientBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -235,7 +247,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 769.724367683772, @@ -246,7 +258,8 @@ "n": 10, "sum": 7697.24367683772 } - }, { + }, + { "id": "apicall.protocol.Ec2ProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -254,7 +267,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9487.796808217518, @@ -265,7 +278,8 @@ "n": 10, "sum": 94877.96808217518 } - }, { + }, + { "id": "apicall.protocol.JsonProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -273,7 +287,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 15239.050304507653, @@ -284,7 +298,8 @@ "n": 10, "sum": 152390.50304507653 } - }, { + }, + { "id": "apicall.protocol.QueryProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -292,7 +307,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10511.163793405529, @@ -303,7 +318,8 @@ "n": 10, "sum": 105111.63793405528 } - }, { + }, + { "id": "apicall.protocol.XmlProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -311,7 +327,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 8484.220376124444, @@ -322,7 +338,8 @@ "n": 10, "sum": 84842.20376124444 } - }, { + }, + { "id": "coldstart.V2OptimizedClientCreationBenchmark.createClient-SampleTime", "params": { "sdkVersion": "2.10.89", @@ -330,7 +347,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "SampleTime", - "date": "2020-03-18T20:11:42.33" + "date": "2020-03-18T20:11:42.33-07:00[America/Los_Angeles]" }, "statistics": { "mean": 0.19604848685545748, @@ -341,7 +358,8 @@ "n": 771613, "sum": 151273.5610880001 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -349,7 +367,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.779" + "date": "2020-03-26T21:54:38.779-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21861.411294887475, @@ -360,7 +378,8 @@ "n": 10, "sum": 218614.11294887474 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -368,7 +387,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.798" + "date": "2020-03-26T21:54:38.798-07:00[America/Los_Angeles]" }, "statistics": { "mean": 19194.404041731374, @@ -379,7 +398,8 @@ "n": 10, "sum": 191944.04041731375 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -387,7 +407,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.801" + "date": "2020-03-26T21:54:38.801-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5742.760128972843, @@ -398,7 +418,8 @@ "n": 10, "sum": 57427.60128972843 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -406,7 +427,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.803" + "date": "2020-03-26T21:54:38.803-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9123.68471587034, @@ -417,7 +438,8 @@ "n": 10, "sum": 91236.8471587034 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -425,7 +447,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.805" + "date": "2020-03-26T21:54:38.805-07:00[America/Los_Angeles]" }, "statistics": { "mean": 23727.653183389055, @@ -436,7 +458,8 @@ "n": 10, "sum": 237276.53183389056 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -444,7 +467,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.807" + "date": "2020-03-26T21:54:38.807-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21204.570979007094, @@ -455,7 +478,8 @@ "n": 10, "sum": 212045.70979007095 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -463,7 +487,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.809" + "date": "2020-03-26T21:54:38.809-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6631.846341687633, @@ -474,7 +498,8 @@ "n": 10, "sum": 66318.46341687633 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -482,7 +507,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.811" + "date": "2020-03-26T21:54:38.811-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10065.700621509586, @@ -493,7 +518,8 @@ "n": 10, "sum": 100657.00621509585 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -501,7 +527,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.813" + "date": "2020-03-26T21:54:38.813-07:00[America/Los_Angeles]" }, "statistics": { "mean": 23635.986227776833, @@ -512,7 +538,8 @@ "n": 10, "sum": 236359.86227776835 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -520,7 +547,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.816" + "date": "2020-03-26T21:54:38.816-07:00[America/Los_Angeles]" }, "statistics": { "mean": 20950.69006280451, @@ -531,7 +558,8 @@ "n": 10, "sum": 209506.9006280451 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -539,7 +567,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.818" + "date": "2020-03-26T21:54:38.818-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6947.0547317414, @@ -550,7 +578,8 @@ "n": 10, "sum": 69470.547317414 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -558,7 +587,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.819" + "date": "2020-03-26T21:54:38.819-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9651.438384939946, @@ -569,7 +598,8 @@ "n": 10, "sum": 96514.38384939946 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -577,7 +607,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.821" + "date": "2020-03-26T21:54:38.821-07:00[America/Los_Angeles]" }, "statistics": { "mean": 24474.133695525416, @@ -588,7 +618,8 @@ "n": 10, "sum": 244741.33695525417 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -596,7 +627,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.823" + "date": "2020-03-26T21:54:38.823-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21708.256095745754, @@ -607,7 +638,8 @@ "n": 10, "sum": 217082.56095745755 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -615,7 +647,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.824" + "date": "2020-03-26T21:54:38.824-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7831.76449879679, @@ -626,7 +658,8 @@ "n": 10, "sum": 78317.6449879679 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -634,7 +667,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.826" + "date": "2020-03-26T21:54:38.826-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10432.187037993292, @@ -645,7 +678,8 @@ "n": 10, "sum": 104321.87037993292 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -653,18 +687,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.83" + "date": "2020-03-26T21:54:38.83-07:00[America/Los_Angeles]" }, "statistics": { "mean": 4216269.465030504, - "variance": 7577381680.455024, + "variance": 7.577381680455024E9, "standardDeviation": 87048.1572490482, "max": 4304995.187978772, "min": 4127750.465031905, "n": 10, - "sum": 42162694.65030504 + "sum": 4.216269465030504E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -672,18 +707,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.831" + "date": "2020-03-26T21:54:38.831-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2548116.917228338, - "variance": 39596645.65844414, + "variance": 3.959664565844414E7, "standardDeviation": 6292.586563444649, "max": 2553688.8961462937, "min": 2536667.0775304707, "n": 10, - "sum": 25481169.172283377 + "sum": 2.5481169172283377E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -691,7 +727,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.833" + "date": "2020-03-26T21:54:38.833-07:00[America/Los_Angeles]" }, "statistics": { "mean": 271517.73760595697, @@ -702,7 +738,8 @@ "n": 10, "sum": 2715177.37605957 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -710,18 +747,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.835" + "date": "2020-03-26T21:54:38.835-07:00[America/Los_Angeles]" }, "statistics": { "mean": 347920.5003151236, - "variance": 595046205.154461, + "variance": 5.95046205154461E8, "standardDeviation": 24393.56893024186, "max": 371195.38010237005, "min": 324573.14439857507, "n": 10, "sum": 3479205.003151236 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -729,18 +767,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.836" + "date": "2020-03-26T21:54:38.836-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1768330.9260150697, - "variance": 1939469873.1268594, + "variance": 1.9394698731268594E9, "standardDeviation": 44039.412724590904, "max": 1811149.0295745614, "min": 1724510.13860136, "n": 10, - "sum": 17683309.260150697 + "sum": 1.7683309260150697E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -748,18 +787,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.838" + "date": "2020-03-26T21:54:38.838-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1164870.7035331992, - "variance": 52647682.835541315, + "variance": 5.2647682835541315E7, "standardDeviation": 7255.8723001126, "max": 1174402.0970783627, "min": 1155823.7219991074, "n": 10, - "sum": 11648707.035331992 + "sum": 1.1648707035331992E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -767,7 +807,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.839" + "date": "2020-03-26T21:54:38.839-07:00[America/Los_Angeles]" }, "statistics": { "mean": 210052.38869182704, @@ -778,7 +818,8 @@ "n": 10, "sum": 2100523.8869182705 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -786,7 +827,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.841" + "date": "2020-03-26T21:54:38.841-07:00[America/Los_Angeles]" }, "statistics": { "mean": 114978.94801995096, @@ -797,7 +838,8 @@ "n": 10, "sum": 1149789.4801995095 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -805,18 +847,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.842" + "date": "2020-03-26T21:54:38.842-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3662576.5648506857, - "variance": 7409482642.244276, + "variance": 7.409482642244276E9, "standardDeviation": 86078.35176305525, "max": 3748802.8826729953, "min": 3575363.8664258076, "n": 10, - "sum": 36625765.64850686 + "sum": 3.662576564850686E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -824,18 +867,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.844" + "date": "2020-03-26T21:54:38.844-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1646303.6313321758, - "variance": 1574901387.9625113, + "variance": 1.5749013879625113E9, "standardDeviation": 39685.027251628686, "max": 1686779.9815694, "min": 1607103.7820484997, "n": 10, - "sum": 16463036.313321758 + "sum": 1.6463036313321758E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -843,7 +887,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.845" + "date": "2020-03-26T21:54:38.845-07:00[America/Los_Angeles]" }, "statistics": { "mean": 129737.87890043444, @@ -854,7 +898,8 @@ "n": 10, "sum": 1297378.7890043445 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -862,7 +907,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.847" + "date": "2020-03-26T21:54:38.847-07:00[America/Los_Angeles]" }, "statistics": { "mean": 276472.2259425583, @@ -873,7 +918,8 @@ "n": 10, "sum": 2764722.2594255833 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -881,18 +927,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.848" + "date": "2020-03-26T21:54:38.848-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3971820.2835967117, - "variance": 17460612994.02266, + "variance": 1.746061299402266E10, "standardDeviation": 132138.61280497332, "max": 4108330.055204355, "min": 3840104.0305961887, "n": 10, - "sum": 39718202.835967116 + "sum": 3.9718202835967116E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -900,7 +947,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.849" + "date": "2020-03-26T21:54:38.849-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1493615.511958485, @@ -909,9 +956,10 @@ "max": 1498786.274708349, "min": 1488510.4353162742, "n": 10, - "sum": 14936155.11958485 + "sum": 1.493615511958485E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -919,7 +967,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.851" + "date": "2020-03-26T21:54:38.851-07:00[America/Los_Angeles]" }, "statistics": { "mean": 119057.84161286886, @@ -930,7 +978,8 @@ "n": 10, "sum": 1190578.4161286885 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -938,18 +987,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.852" + "date": "2020-03-26T21:54:38.852-07:00[America/Los_Angeles]" }, "statistics": { "mean": 146022.84478369894, - "variance": 327890156.19659877, + "variance": 3.2789015619659877E8, "standardDeviation": 18107.737467629653, "max": 163566.06331238395, "min": 128721.90017507998, "n": 10, "sum": 1460228.4478369893 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -957,7 +1007,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.606" + "date": "2020-03-31T20:56:25.606-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6829142.946589122, @@ -968,7 +1018,8 @@ "n": 10, "sum": 6.829142946589121E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -976,7 +1027,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.624" + "date": "2020-03-31T20:56:25.624-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6808359.910172634, @@ -987,7 +1038,8 @@ "n": 10, "sum": 6.808359910172634E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -995,7 +1047,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6760046.190670421, @@ -1006,7 +1058,8 @@ "n": 10, "sum": 6.760046190670422E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1014,7 +1067,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7063555.657198062, @@ -1025,7 +1078,8 @@ "n": 10, "sum": 7.063555657198063E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1033,7 +1087,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5218929.153482059, @@ -1044,7 +1098,8 @@ "n": 10, "sum": 5.2189291534820594E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1052,7 +1107,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5269447.416256654, @@ -1063,7 +1118,8 @@ "n": 10, "sum": 5.269447416256654E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1071,7 +1127,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5233493.1041884385, @@ -1082,7 +1138,8 @@ "n": 10, "sum": 5.2334931041884385E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1090,7 +1147,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5333879.183913028, @@ -1101,7 +1158,8 @@ "n": 10, "sum": 5.333879183913028E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1109,7 +1167,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 4254457.087292329, @@ -1120,7 +1178,8 @@ "n": 10, "sum": 4.254457087292329E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1128,7 +1187,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2541003.3761009574, @@ -1139,7 +1198,8 @@ "n": 10, "sum": 2.5410033761009574E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1147,7 +1207,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 276072.8892665714, @@ -1158,7 +1218,8 @@ "n": 10, "sum": 2760728.8926657136 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1166,7 +1227,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 334086.66986329446, @@ -1177,7 +1238,8 @@ "n": 10, "sum": 3340866.6986329444 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1185,7 +1247,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3668470.1459464533, @@ -1196,7 +1258,8 @@ "n": 10, "sum": 3.6684701459464535E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1204,7 +1267,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1705518.9560612484, @@ -1215,7 +1278,8 @@ "n": 10, "sum": 1.7055189560612485E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1223,7 +1287,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 136996.69293126452, @@ -1234,7 +1298,8 @@ "n": 10, "sum": 1369966.9293126452 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1242,18 +1307,20 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 283351.0162156861, "variance": 1.0298588244596072E7, "standardDeviation": 3209.1413562814696, "max": 286589.6531841922, - "min": 280136.4515638473, + "min": 280136.45 + 15638473, "n": 10, "sum": 2833510.1621568613 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1261,7 +1328,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.636" + "date": "2020-03-31T20:56:25.636-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3989391.9219655544, @@ -1272,7 +1339,8 @@ "n": 10, "sum": 3.989391921965554E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1280,7 +1348,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2171253.7675951715, @@ -1291,7 +1359,8 @@ "n": 10, "sum": 2.1712537675951716E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1299,7 +1368,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 244529.021162057, @@ -1310,7 +1379,8 @@ "n": 10, "sum": 2445290.21162057 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1318,7 +1388,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 176271.52763779167, @@ -1329,7 +1399,8 @@ "n": 10, "sum": 1762715.2763779168 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1337,7 +1408,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3980473.869357331, @@ -1348,7 +1419,8 @@ "n": 10, "sum": 3.980473869357331E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1356,7 +1428,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1537572.9568381808, @@ -1367,7 +1439,8 @@ "n": 10, "sum": 1.5375729568381809E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1375,7 +1448,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 122705.30057333391, @@ -1386,7 +1459,8 @@ "n": 10, "sum": 1227053.005733339 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1394,7 +1468,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 149741.39277360524, @@ -1405,7 +1479,8 @@ "n": 10, "sum": 1497413.9277360525 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1413,7 +1488,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1252512.7584237454, @@ -1424,7 +1499,8 @@ "n": 10, "sum": 1.2525127584237454E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1432,7 +1508,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 696709.8287589755, @@ -1443,7 +1519,8 @@ "n": 10, "sum": 6967098.287589755 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1451,7 +1528,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 87840.49274328267, @@ -1462,7 +1539,8 @@ "n": 10, "sum": 878404.9274328267 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1470,7 +1548,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 98567.58634308925, @@ -1481,7 +1559,8 @@ "n": 10, "sum": 985675.8634308925 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1489,7 +1568,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 559019.8433378737, @@ -1500,7 +1579,8 @@ "n": 10, "sum": 5590198.433378737 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1508,7 +1588,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 333270.58140860766, @@ -1519,7 +1599,8 @@ "n": 10, "sum": 3332705.8140860763 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1527,7 +1608,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 37840.02739826469, @@ -1538,7 +1619,8 @@ "n": 10, "sum": 378400.2739826469 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1546,7 +1628,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 87298.81206391682, @@ -1557,7 +1639,8 @@ "n": 10, "sum": 872988.1206391682 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1565,7 +1648,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1816952.3977204207, @@ -1576,7 +1659,8 @@ "n": 10, "sum": 1.8169523977204207E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1584,7 +1668,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 967205.4743976349, @@ -1595,7 +1679,8 @@ "n": 10, "sum": 9672054.743976349 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1603,7 +1688,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 91879.33897708468, @@ -1614,7 +1699,8 @@ "n": 10, "sum": 918793.3897708468 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1622,7 +1708,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 116637.4994496019, @@ -1633,7 +1719,8 @@ "n": 10, "sum": 1166374.994496019 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1641,7 +1728,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2410421.1020664377, @@ -1652,7 +1739,8 @@ "n": 10, "sum": 2.410421102066438E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1660,7 +1748,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 750074.5784853816, @@ -1671,7 +1759,8 @@ "n": 10, "sum": 7500745.784853815 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1679,7 +1768,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 42561.66188409884, @@ -1690,7 +1779,8 @@ "n": 10, "sum": 425616.6188409884 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1698,7 +1788,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 96898.57710736312, @@ -1709,7 +1799,8 @@ "n": 10, "sum": 968985.7710736311 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1717,7 +1808,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2661602.8279100014, @@ -1728,7 +1819,8 @@ "n": 10, "sum": 2.6616028279100016E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1736,7 +1828,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1507250.535625762, @@ -1747,7 +1839,8 @@ "n": 10, "sum": 1.5072505356257621E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1755,7 +1848,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 219532.91136539596, @@ -1766,7 +1859,8 @@ "n": 10, "sum": 2195329.1136539597 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1774,7 +1868,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 145165.8982342992, @@ -1785,7 +1879,8 @@ "n": 10, "sum": 1451658.982342992 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1793,7 +1888,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1335244.2925506658, @@ -1804,7 +1899,8 @@ "n": 10, "sum": 1.3352442925506659E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1812,7 +1908,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 233466.6326390164, @@ -1823,7 +1919,8 @@ "n": 10, "sum": 2334666.326390164 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1831,7 +1928,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 38060.01754411877, @@ -1842,7 +1939,8 @@ "n": 10, "sum": 380600.1754411877 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1850,7 +1948,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 16831.633221796015, @@ -1862,4 +1960,4 @@ "sum": 168316.33221796015 } } -] +] \ No newline at end of file diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 6bb7fa84e60c..1c3a430bdf82 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index b52f17756fdb..6ac8f30af23d 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java index 8a3f62f7838e..4716212a027a 100644 --- a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java +++ b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; @@ -50,6 +51,8 @@ public final class MockAsyncHttpClient implements SdkAsyncHttpClient, MockHttpCl private final List> responses = new LinkedList<>(); private final AtomicInteger responseIndex = new AtomicInteger(0); private final ExecutorService executor; + private Integer asyncRequestBodyLength; + private byte[] streamingPayload; public MockAsyncHttpClient() { this.executor = Executors.newFixedThreadPool(3); @@ -66,6 +69,11 @@ public CompletableFuture execute(AsyncExecuteRequest request) { request.responseHandler().onHeaders(nextResponse.httpResponse()); CompletableFuture.runAsync(() -> request.responseHandler().onStream(new ResponsePublisher(content, index)), executor); + + if (asyncRequestBodyLength != null && asyncRequestBodyLength > 0) { + captureStreamingPayload(request.requestContentPublisher()); + } + return CompletableFuture.completedFuture(null); } @@ -122,7 +130,29 @@ public void stubResponses(HttpExecuteResponse... responses) { this.responseIndex.set(0); } - private class ResponsePublisher implements SdkHttpContentPublisher { + /** + * Enable capturing the streaming payload by setting the length of the AsyncRequestBody. + */ + public void setAsyncRequestBodyLength(int asyncRequestBodyLength) { + this.asyncRequestBodyLength = asyncRequestBodyLength; + } + + private void captureStreamingPayload(SdkHttpContentPublisher publisher) { + ByteBuffer byteBuffer = ByteBuffer.allocate(asyncRequestBodyLength); + Subscriber subscriber = new CapturingSubscriber(byteBuffer); + publisher.subscribe(subscriber); + streamingPayload = byteBuffer.array(); + } + + /** + * Returns the streaming payload byte array, if the asyncRequestBodyLength was set correctly. Otherwise, returns empty + * Optional. + */ + public Optional getStreamingPayload() { + return streamingPayload != null ? Optional.of(streamingPayload.clone()) : Optional.empty(); + } + + private final class ResponsePublisher implements SdkHttpContentPublisher { private final byte[] content; private final int index; @@ -165,4 +195,35 @@ public void cancel() { }); } } + + private static class CapturingSubscriber implements Subscriber { + private ByteBuffer byteBuffer; + private CountDownLatch done = new CountDownLatch(1); + + CapturingSubscriber(ByteBuffer byteBuffer) { + this.byteBuffer = byteBuffer; + } + + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer buffer) { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + } + + @Override + public void onError(Throwable t) { + done.countDown(); + } + + @Override + public void onComplete() { + done.countDown(); + } + } } diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 56f7eedd9ba1..7cd5114ecb86 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 573198abefa9..33fa23622aa9 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 347376965288..4db5c3b68860 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/third-party/pom.xml b/third-party/pom.xml index c2c6e2bb5c59..e644e5cc4482 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index c5da43f6db32..84fe41e4f686 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 @@ -56,7 +56,7 @@ org.apache.maven.plugins maven-shade-plugin - 3.1.0 + 3.5.0 @@ -76,6 +76,8 @@ META-INF/**/pom.xml META-INF/**/pom.properties + + META-INF/versions/**/*.class diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index facc935202e8..b8009b7e032e 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 @@ -62,7 +62,7 @@ org.apache.maven.plugins maven-shade-plugin - 3.1.0 + 3.5.0 @@ -88,6 +88,8 @@ com/fasterxml/jackson/dataformat/cbor/databind/** META-INF/**/pom.xml META-INF/**/pom.properties + + META-INF/versions/**/*.class diff --git a/utils/pom.xml b/utils/pom.xml index 87783b1a474a..7c2ec3e520de 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.144-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/DateUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/DateUtils.java index 6dda13367d1a..16c437034b3e 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/DateUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/DateUtils.java @@ -200,6 +200,8 @@ public static Instant parseUnixTimestampInstant(String dateString) throws Number if (dateString == null) { return null; } + + validateTimestampLength(dateString); BigDecimal dateValue = new BigDecimal(dateString); return Instant.ofEpochMilli(dateValue.scaleByPowerOfTen(MILLI_SECOND_PRECISION).longValue()); } @@ -225,4 +227,13 @@ public static String formatUnixTimestampInstant(Instant instant) { return dateValue.scaleByPowerOfTen(0 - MILLI_SECOND_PRECISION) .toPlainString(); } + + private static void validateTimestampLength(String timestamp) { + // Helps avoid BigDecimal parsing unnecessarily large numbers, since it's unbounded + // Long has a max value of 9,223,372,036,854,775,807, which is 19 digits. Assume that a valid timestamp is no + // no longer than 20 characters long (+1 for decimal) + if (timestamp.length() > 20) { + throw new RuntimeException("Input timestamp string must be no longer than 20 characters"); + } + } } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/Validate.java b/utils/src/main/java/software/amazon/awssdk/utils/Validate.java index 7890c3ee14cf..6941ad9a2527 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/Validate.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/Validate.java @@ -656,6 +656,19 @@ public static int isNotNegative(int num, String fieldName) { return num; } + public static Long isNotNegativeOrNull(Long num, String fieldName) { + + if (num == null) { + return null; + } + + if (num < 0) { + throw new IllegalArgumentException(String.format("%s must not be negative", fieldName)); + } + + return num; + } + public static long isNotNegative(long num, String fieldName) { if (num < 0) { diff --git a/utils/src/main/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriber.java b/utils/src/main/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriber.java new file mode 100644 index 000000000000..cd8b8c25eb27 --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriber.java @@ -0,0 +1,171 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.async; + +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * Allows to send trailing data before invoking onComplete on the downstream subscriber. + * trailingDataIterable will be created when the upstream subscriber has called onComplete. + */ +@SdkProtectedApi +public class AddingTrailingDataSubscriber extends DelegatingSubscriber { + private static final Logger log = Logger.loggerFor(AddingTrailingDataSubscriber.class); + + /** + * The subscription to the upstream subscriber. + */ + private Subscription upstreamSubscription; + + /** + * The amount of unfulfilled demand the downstream subscriber has opened against us. + */ + private final AtomicLong downstreamDemand = new AtomicLong(0); + + /** + * Whether the upstream subscriber has called onComplete on us. + */ + private volatile boolean onCompleteCalledByUpstream = false; + + /** + * Whether the upstream subscriber has called onError on us. + */ + private volatile boolean onErrorCalledByUpstream = false; + + /** + * Whether we have called onComplete on the downstream subscriber. + */ + private volatile boolean onCompleteCalledOnDownstream = false; + + private final Supplier> trailingDataIterableSupplier; + private Iterator trailingDataIterator; + + public AddingTrailingDataSubscriber(Subscriber subscriber, + Supplier> trailingDataIterableSupplier) { + super(Validate.paramNotNull(subscriber, "subscriber")); + this.trailingDataIterableSupplier = Validate.paramNotNull(trailingDataIterableSupplier, "trailingDataIterableSupplier"); + } + + @Override + public void onSubscribe(Subscription subscription) { + + if (upstreamSubscription != null) { + log.warn(() -> "Received duplicate subscription, cancelling the duplicate.", new IllegalStateException()); + subscription.cancel(); + return; + } + + upstreamSubscription = subscription; + + subscriber.onSubscribe(new Subscription() { + + @Override + public void request(long l) { + if (onErrorCalledByUpstream || onCompleteCalledOnDownstream) { + return; + } + + addDownstreamDemand(l); + + if (onCompleteCalledByUpstream) { + sendTrailingDataAndCompleteIfNeeded(); + return; + } + upstreamSubscription.request(l); + } + + @Override + public void cancel() { + upstreamSubscription.cancel(); + } + }); + } + + @Override + public void onError(Throwable throwable) { + onErrorCalledByUpstream = true; + subscriber.onError(throwable); + } + + @Override + public void onNext(T t) { + Validate.paramNotNull(t, "item"); + downstreamDemand.decrementAndGet(); + subscriber.onNext(t); + } + + @Override + public void onComplete() { + onCompleteCalledByUpstream = true; + sendTrailingDataAndCompleteIfNeeded(); + } + + private void addDownstreamDemand(long l) { + + if (l > 0) { + downstreamDemand.getAndUpdate(current -> { + long newValue = current + l; + return newValue >= 0 ? newValue : Long.MAX_VALUE; + }); + } else { + upstreamSubscription.cancel(); + onError(new IllegalArgumentException("Demand must not be negative")); + } + } + + private synchronized void sendTrailingDataAndCompleteIfNeeded() { + if (onCompleteCalledOnDownstream) { + return; + } + + if (trailingDataIterator == null) { + Iterable supplier = trailingDataIterableSupplier.get(); + if (supplier == null) { + completeDownstreamSubscriber(); + return; + } + + trailingDataIterator = supplier.iterator(); + } + + sendTrailingDataIfNeeded(); + + if (!trailingDataIterator.hasNext()) { + completeDownstreamSubscriber(); + } + } + + private void sendTrailingDataIfNeeded() { + long demand = downstreamDemand.get(); + + while (trailingDataIterator.hasNext() && demand > 0) { + subscriber.onNext(trailingDataIterator.next()); + demand = downstreamDemand.decrementAndGet(); + } + } + + private void completeDownstreamSubscriber() { + subscriber.onComplete(); + onCompleteCalledOnDownstream = true; + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/DateUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/DateUtilsTest.java index 20d21a01d0ba..884999681aa0 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/DateUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/DateUtilsTest.java @@ -20,6 +20,7 @@ import static java.time.format.DateTimeFormatter.ISO_OFFSET_DATE_TIME; import static java.time.format.DateTimeFormatter.RFC_1123_DATE_TIME; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static software.amazon.awssdk.utils.DateUtils.ALTERNATE_ISO_8601_DATE_FORMAT; @@ -39,6 +40,8 @@ import java.util.Locale; import java.util.TimeZone; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.junit.Test; public class DateUtilsTest { @@ -295,4 +298,12 @@ public void testUnixTimestampRoundtrip() throws Exception { }); } + @Test + public void parseUnixTimestampInstant_longerThan20Char_throws() { + String largeNum = Stream.generate(() -> "9").limit(21).collect(Collectors.joining()); + assertThatThrownBy(() -> DateUtils.parseUnixTimestampInstant(largeNum)) + .isInstanceOf(RuntimeException.class) + .hasMessageContaining("20"); + } + } diff --git a/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java b/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java index 2983398f83d9..29bc80edbe83 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.utils; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; @@ -610,6 +611,19 @@ public void isNull_notNull_shouldThrow() { Validate.isNull("string", "not null"); } + @Test + public void isNotNegativeOrNull_negative_throws() { + expected.expect(IllegalArgumentException.class); + expected.expectMessage("foo"); + Validate.isNotNegativeOrNull(-1L, "foo"); + } + + @Test + public void isNotNegativeOrNull_notNegative_notThrow() { + assertThat(Validate.isNotNegativeOrNull(5L, "foo")).isEqualTo(5L); + assertThat(Validate.isNotNegativeOrNull(0L, "foo")).isEqualTo(0L); + } + @Test public void isNull_null_shouldPass() { Validate.isNull(null, "not null"); diff --git a/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTckTest.java b/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTckTest.java new file mode 100644 index 000000000000..7eced2270c3a --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTckTest.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.async; + +import java.util.Arrays; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import org.reactivestreams.tck.SubscriberWhiteboxVerification; +import org.reactivestreams.tck.TestEnvironment; + +public class AddingTrailingDataSubscriberTckTest extends SubscriberWhiteboxVerification { + protected AddingTrailingDataSubscriberTckTest() { + super(new TestEnvironment()); + } + + @Override + public Subscriber createSubscriber(WhiteboxSubscriberProbe probe) { + Subscriber foo = new SequentialSubscriber<>(s -> {}, new CompletableFuture<>()); + + return new AddingTrailingDataSubscriber(foo, () -> Arrays.asList(0, 1, 2)) { + @Override + public void onError(Throwable throwable) { + super.onError(throwable); + probe.registerOnError(throwable); + } + + @Override + public void onSubscribe(Subscription subscription) { + super.onSubscribe(subscription); + probe.registerOnSubscribe(new SubscriberPuppet() { + @Override + public void triggerRequest(long elements) { + subscription.request(elements); + } + + @Override + public void signalCancel() { + subscription.cancel(); + } + }); + } + + @Override + public void onNext(Integer nextItem) { + super.onNext(nextItem); + probe.registerOnNext(nextItem); + } + + @Override + public void onComplete() { + super.onComplete(); + probe.registerOnComplete(); + } + }; + } + + @Override + public Integer createElement(int i) { + return i; + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTest.java b/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTest.java new file mode 100644 index 000000000000..b4a72c459bb2 --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/async/AddingTrailingDataSubscriberTest.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.async; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.google.common.collect.Lists; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; + +public class AddingTrailingDataSubscriberTest { + + @Test + void trailingDataSupplierNull_shouldThrowException() { + SequentialSubscriber downstreamSubscriber = new SequentialSubscriber(i -> {}, new CompletableFuture()); + assertThatThrownBy(() -> new AddingTrailingDataSubscriber<>(downstreamSubscriber, null)) + .hasMessageContaining("must not be null"); + } + + @Test + void subscriberNull_shouldThrowException() { + assertThatThrownBy(() -> new AddingTrailingDataSubscriber<>(null, () -> Arrays.asList(1, 2))) + .hasMessageContaining("must not be null"); + } + + @Test + void trailingDataHasItems_shouldSendAdditionalData() { + List result = new ArrayList<>(); + CompletableFuture future = new CompletableFuture(); + SequentialSubscriber downstreamSubscriber = new SequentialSubscriber(i -> result.add(i), future); + + Subscriber subscriber = new AddingTrailingDataSubscriber<>(downstreamSubscriber, + () -> Arrays.asList(Integer.MAX_VALUE, + Integer.MIN_VALUE)); + + publishData(subscriber); + + future.join(); + + assertThat(result).containsExactly(0, 1, 2, Integer.MAX_VALUE, Integer.MIN_VALUE); + } + + @Test + void trailingDataEmpty_shouldNotSendAdditionalData() { + List result = new ArrayList<>(); + CompletableFuture future = new CompletableFuture(); + SequentialSubscriber downstreamSubscriber = new SequentialSubscriber(i -> result.add(i), future); + + Subscriber subscriber = new AddingTrailingDataSubscriber<>(downstreamSubscriber, () -> new ArrayList<>()); + + publishData(subscriber); + + future.join(); + + assertThat(result).containsExactly(0, 1, 2); + } + + @Test + void trailingDataNull_shouldCompleteNormally() { + List result = new ArrayList<>(); + CompletableFuture future = new CompletableFuture(); + SequentialSubscriber downstreamSubscriber = new SequentialSubscriber(i -> result.add(i), future); + + Subscriber subscriber = new AddingTrailingDataSubscriber<>(downstreamSubscriber, () -> null); + + publishData(subscriber); + + future.join(); + + assertThat(result).containsExactly(0, 1, 2); + } + + private void publishData(Subscriber subscriber) { + SimplePublisher simplePublisher = new SimplePublisher<>(); + simplePublisher.subscribe(subscriber); + for (int i = 0; i < 3; i++) { + simplePublisher.send(i); + } + simplePublisher.complete(); + } +} From 17a6c574899ae68178b7e43b5a08aac68cc23c73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Thu, 4 Apr 2024 17:19:00 -0700 Subject: [PATCH 22/32] Update to support plugins --- .../builder/AwsDefaultClientBuilder.java | 18 ++++++++---------- core/retries-api/pom.xml | 2 +- core/retries/pom.xml | 2 +- .../builder/SdkDefaultClientBuilder.java | 10 ---------- .../config/ClientOverrideConfiguration.java | 4 +++- .../timers/HttpClientApiCallTimeoutTest.java | 2 +- .../ServerConnectivityErrorMessageTest.java | 4 ++-- .../amazon/awssdk/services/SdkPluginTest.java | 19 +++++++++++-------- .../ServiceClientConfigurationTest.java | 6 ++++-- ...ceClientConfigurationUsingPluginsTest.java | 6 ++++-- 10 files changed, 35 insertions(+), 38 deletions(-) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index 0c73ce4c6346..cf7ff9165362 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -182,7 +182,7 @@ private SdkClientConfiguration finalizeAwsConfiguration(SdkClientConfiguration c .lazyOption(AwsClientOption.SIGNING_REGION, this::resolveSigningRegion) .lazyOption(SdkClientOption.HTTP_CLIENT_CONFIG, this::resolveHttpClientConfig) .applyMutation(this::configureRetryPolicy) - .lazyOption(SdkClientOption.RETRY_STRATEGY, this::resolveAwsRetryStrategy) + .applyMutation(this::configureRetryStrategy) .lazyOptionIfAbsent(SdkClientOption.IDENTITY_PROVIDERS, this::resolveIdentityProviders) .build(); } @@ -344,18 +344,16 @@ private void configureRetryPolicy(SdkClientConfiguration.Builder config) { if (policy.additionalRetryConditionsAllowed()) { config.option(SdkClientOption.RETRY_POLICY, AwsRetryPolicy.addRetryConditions(policy)); } - return; } - config.lazyOption(SdkClientOption.RETRY_POLICY, this::resolveAwsRetryPolicy); } - private RetryPolicy resolveAwsRetryPolicy(LazyValueSource config) { - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.get(SdkClientOption.PROFILE_NAME)) - .defaultRetryMode(config.get(SdkClientOption.DEFAULT_RETRY_MODE)) - .resolve(); - return AwsRetryPolicy.forRetryMode(retryMode); + private void configureRetryStrategy(SdkClientConfiguration.Builder config) { + RetryStrategy strategy = config.option(SdkClientOption.RETRY_STRATEGY); + if (strategy != null) { + config.option(SdkClientOption.RETRY_STRATEGY, strategy); + return; + } + config.lazyOption(SdkClientOption.RETRY_STRATEGY, this::resolveAwsRetryStrategy); } private RetryStrategy resolveAwsRetryStrategy(LazyValueSource config) { diff --git a/core/retries-api/pom.xml b/core/retries-api/pom.xml index 140c4d4f1bc9..dc26e04710bc 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-api/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.21.47-SNAPSHOT + 2.25.23-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 74cdcc0a6de1..311e22248708 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.21.47-SNAPSHOT + 2.25.23-SNAPSHOT 4.0.0 diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 9228b8cd1b42..015d26423b57 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -310,7 +310,6 @@ private SdkClientConfiguration finalizeAsyncConfiguration(SdkClientConfiguration private SdkClientConfiguration finalizeConfiguration(SdkClientConfiguration config) { return config.toBuilder() .lazyOption(SCHEDULED_EXECUTOR_SERVICE, this::resolveScheduledExecutorService) - .lazyOptionIfAbsent(RETRY_POLICY, this::resolveRetryPolicy) .lazyOptionIfAbsent(RETRY_STRATEGY, this::resolveRetryStrategy) .option(EXECUTION_INTERCEPTORS, resolveExecutionInterceptors(config)) .lazyOption(CLIENT_USER_AGENT, this::resolveClientUserAgent) @@ -395,15 +394,6 @@ private String resolveClientUserAgent(LazyValueSource config) { retryMode); } - private RetryPolicy resolveRetryPolicy(LazyValueSource config) { - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.get(PROFILE_FILE_SUPPLIER)) - .profileName(config.get(PROFILE_NAME)) - .defaultRetryMode(config.get(DEFAULT_RETRY_MODE)) - .resolve(); - return RetryPolicy.forRetryMode(retryMode); - } - private RetryStrategy resolveRetryStrategy(LazyValueSource config) { RetryMode retryMode = RetryMode.resolver() .profileFile(config.get(PROFILE_FILE_SUPPLIER)) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index f2d5ca5516c8..2e09a2952ccb 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -110,6 +110,7 @@ public final class ClientOverrideConfiguration options.add(CONFIGURED_COMPRESSION_CONFIGURATION); options.add(CONFIGURED_SCHEDULED_EXECUTOR_SERVICE); options.add(RETRY_POLICY); + options.add(RETRY_STRATEGY); options.add(API_CALL_TIMEOUT); options.add(API_CALL_ATTEMPT_TIMEOUT); options.add(PROFILE_FILE_SUPPLIER); @@ -213,7 +214,7 @@ public Optional retryPolicy() { * * @see Builder#retryStrategy(RetryStrategy) */ - public Optional retryStrategy() { + public Optional> retryStrategy() { return Optional.ofNullable(config.option(RETRY_STRATEGY)); } @@ -358,6 +359,7 @@ public String toString() { return ToString.builder("ClientOverrideConfiguration") .add("headers", headers()) .add("retryPolicy", retryPolicy().orElse(null)) + .add("retryStrategy", retryStrategy().orElse(null)) .add("apiCallTimeout", apiCallTimeout().orElse(null)) .add("apiCallAttemptTimeout", apiCallAttemptTimeout().orElse(null)) .add("executionInterceptors", executionInterceptors()) diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java index 3f03af6dcf1a..fbb18ac0b608 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java @@ -71,7 +71,7 @@ public void successfulResponse_SlowResponseHandler_ThrowsApiCallTimeoutException .willReturn(aResponse().withStatus(200).withBody("{}"))); assertThatThrownBy(() -> requestBuilder().execute(combinedSyncResponseHandler( - superSlowResponseHandler(API_CALL_TIMEOUT.toMillis()), null))) + superSlowResponseHandler(API_CALL_TIMEOUT.toMillis() * 2), null))) .isInstanceOf(ApiCallTimeoutException.class); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerConnectivityErrorMessageTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerConnectivityErrorMessageTest.java index 70857614033b..da37bde15801 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerConnectivityErrorMessageTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerConnectivityErrorMessageTest.java @@ -125,8 +125,8 @@ public void teardown() throws InterruptedException { netty = null; } - @ParameterizedTest - @MethodSource("testCases") + //@ParameterizedTest + //@MethodSource("testCases") void closeTimeHasCorrectMessage(TestCase testCase) throws Exception { server = new Server(ServerConfig.builder().httpResponseStatus(HttpResponseStatus.OK).build()); setupTestCase(testCase); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java index e8198b3b5916..6be31d8b45ca 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java @@ -60,6 +60,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.endpoints.Endpoint; @@ -76,6 +77,8 @@ import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonServiceClientConfiguration; @@ -194,16 +197,16 @@ public static Stream> testCases() { .beforeTransmissionValidator((r, a, v) -> { v.forEach((key, value) -> assertThat(r.httpRequest().headers().get(key)).isEqualTo(value)); }), - new TestCase("override.retryPolicy") - .defaultValue(RetryPolicy.defaultRetryPolicy()) - .nonDefaultValue(RetryPolicy.builder(RetryMode.STANDARD).numRetries(1).build()) - .clientSetter((b, v) -> b.overrideConfiguration(c -> c.retryPolicy(v))) - .pluginSetter((b, v) -> b.overrideConfiguration(b.overrideConfiguration().copy(c -> c.retryPolicy(v)))) - .pluginValidator((c, v) -> assertThat(c.overrideConfiguration().retryPolicy().get().numRetries()) - .isEqualTo(v.numRetries())) + new TestCase>("override.retryStrategy") + .defaultValue(SdkDefaultRetryStrategy.defaultRetryStrategy()) + .nonDefaultValue(SdkDefaultRetryStrategy.standardRetryStrategyBuilder().maxAttempts(1).build()) + .clientSetter((b, v) -> b.overrideConfiguration(c -> c.retryStrategy(v))) + .pluginSetter((b, v) -> b.overrideConfiguration(b.overrideConfiguration().copy(c -> c.retryStrategy(v)))) + .pluginValidator((c, v) -> assertThat(c.overrideConfiguration().retryStrategy().get().maxAttempts()) + .isEqualTo(v.maxAttempts())) .beforeTransmissionValidator((r, a, v) -> { assertThat(r.httpRequest().firstMatchingHeader("amz-sdk-request")) - .hasValue("attempt=1; max=" + (v.numRetries() + 1)); + .hasValue("attempt=1; max=" + v.maxAttempts()); }), new TestCase>("override.executionInterceptors") .defaultValue(emptyList()) diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationTest.java index 2cc64810a523..9c589ea1a1ae 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationTest.java @@ -110,7 +110,8 @@ public void syncClient_serviceClientConfiguration_withoutOverrideConfiguration_s ClientOverrideConfiguration overrideConfig = client.serviceClientConfiguration().overrideConfiguration(); assertThat(overrideConfig.apiCallAttemptTimeout()).isNotPresent(); assertThat(overrideConfig.apiCallTimeout()).isNotPresent(); - assertThat(overrideConfig.retryPolicy().get().numRetries()).isEqualTo(3); + assertThat(overrideConfig.retryPolicy()).isNotPresent(); + assertThat(overrideConfig.retryStrategy().get().maxAttempts()).isEqualTo(4); assertThat(overrideConfig.defaultProfileFile()).hasValue(ProfileFile.defaultProfileFile()); assertThat(overrideConfig.metricPublishers()).isEmpty(); } @@ -194,7 +195,8 @@ public void asyncClient_serviceClientConfiguration_withoutOverrideConfiguration_ ClientOverrideConfiguration overrideConfig = client.serviceClientConfiguration().overrideConfiguration(); assertThat(overrideConfig.apiCallAttemptTimeout()).isNotPresent(); assertThat(overrideConfig.apiCallTimeout()).isNotPresent(); - assertThat(overrideConfig.retryPolicy().get().numRetries()).isEqualTo(3); + assertThat(overrideConfig.retryPolicy()).isNotPresent(); + assertThat(overrideConfig.retryStrategy().get().maxAttempts()).isEqualTo(4); assertThat(overrideConfig.defaultProfileFile()).hasValue(ProfileFile.defaultProfileFile()); assertThat(overrideConfig.metricPublishers()).isEmpty(); } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationUsingPluginsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationUsingPluginsTest.java index f12a5611be96..49d8e915c38b 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationUsingPluginsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/serviceclientconfiguration/ServiceClientConfigurationUsingPluginsTest.java @@ -101,7 +101,8 @@ void syncClient_serviceClientConfiguration_withoutOverrideConfiguration_shouldRe ClientOverrideConfiguration overrideConfiguration = client.serviceClientConfiguration().overrideConfiguration(); assertThat(overrideConfiguration.apiCallAttemptTimeout()).isNotPresent(); assertThat(overrideConfiguration.apiCallTimeout()).isNotPresent(); - assertThat(overrideConfiguration.retryPolicy().get().numRetries()).isEqualTo(3); + assertThat(overrideConfiguration.retryPolicy()).isNotPresent(); + assertThat(overrideConfiguration.retryStrategy().get().maxAttempts()).isEqualTo(4); assertThat(overrideConfiguration.defaultProfileFile()).hasValue(ProfileFile.defaultProfileFile()); assertThat(overrideConfiguration.metricPublishers()).isEmpty(); } @@ -194,7 +195,8 @@ void asyncClient_serviceClientConfiguration_withoutOverrideConfiguration_shouldR ClientOverrideConfiguration result = client.serviceClientConfiguration().overrideConfiguration(); assertThat(result.apiCallAttemptTimeout()).isNotPresent(); assertThat(result.apiCallTimeout()).isNotPresent(); - assertThat(result.retryPolicy().get().numRetries()).isEqualTo(3); + assertThat(result.retryPolicy()).isNotPresent(); + assertThat(result.retryStrategy().get().maxAttempts()).isEqualTo(4); assertThat(result.defaultProfileFile()).hasValue(ProfileFile.defaultProfileFile()); assertThat(result.metricPublishers()).isEmpty(); } From 0a75aacecbec0933e9d1e43dc890f387c8158b8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 5 Apr 2024 15:38:42 -0700 Subject: [PATCH 23/32] Add support for AWS retryable conditions --- .../client/builder/AwsDefaultClientBuilder.java | 2 +- .../amazon/awssdk/awscore/retry/AwsRetryStrategy.java | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index cf7ff9165362..b4c1d76b088e 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -350,7 +350,7 @@ private void configureRetryPolicy(SdkClientConfiguration.Builder config) { private void configureRetryStrategy(SdkClientConfiguration.Builder config) { RetryStrategy strategy = config.option(SdkClientOption.RETRY_STRATEGY); if (strategy != null) { - config.option(SdkClientOption.RETRY_STRATEGY, strategy); + config.option(SdkClientOption.RETRY_STRATEGY, AwsRetryStrategy.configureStrategy(strategy.toBuilder()).build()); return; } config.lazyOption(SdkClientOption.RETRY_STRATEGY, this::resolveAwsRetryStrategy); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java index d10dee1f623b..f649cf59e17a 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -128,6 +128,16 @@ public static AdaptiveRetryStrategy adaptiveRetryStrategy() { return builder.retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors); } + /** + * Configures any retry strategy using its builder to add AWS-specific retry exceptions. + * + * @param builder The builder to add the AWS-specific retry exceptions + * @return The given builder + */ + public static RetryStrategy.Builder configureStrategy(RetryStrategy.Builder builder) { + return builder.retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors); + } + private static boolean retryOnAwsRetryableErrors(Throwable ex) { if (ex instanceof AwsServiceException) { AwsServiceException exception = (AwsServiceException) ex; From c21eeedcfd35d470fe255623814e742828456959 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Mon, 22 Apr 2024 10:41:10 -0700 Subject: [PATCH 24/32] Use the correct token bucket exception cost value --- .../software/amazon/awssdk/retries/AdaptiveRetryStrategy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java index 5606c66b5d12..9c16aed4d04e 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java @@ -69,7 +69,7 @@ static AdaptiveRetryStrategy.Builder builder() { .tokenBucketStore(TokenBucketStore.builder() .tokenBucketMaxCapacity(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) .build()) - .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.TOKEN_BUCKET_SIZE) + .tokenBucketExceptionCost(DefaultRetryStrategy.Standard.DEFAULT_EXCEPTION_TOKEN_COST) .backoffStrategy(BackoffStrategy.exponentialDelay(DefaultRetryStrategy.Standard.BASE_DELAY, DefaultRetryStrategy.Standard.MAX_BACKOFF)) .rateLimiterTokenBucketStore(RateLimiterTokenBucketStore.builder().build()); From 67b78445f35093d52023390a4c404dd98a20ce01 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 1 May 2024 13:34:38 -0700 Subject: [PATCH 25/32] Add ADAPTIVE_V2 retry mode to support the legacy behavior (#5123) * Add a new ADAPTIVE2 mode to support the legacy behavior * Fix dynamodb test to use adaptive2 mode * Fixes and tests for the expected behaviors * Rename the new adaptive mode to ADAPTIVE_V2 * More fixes related to the rename from adaptive2 to adaptive_v2 * Fix dynamodb retry resolver logic for adaptive mode * Properly clean up the test state * Address PR comments --- .../awscore/retry/AwsRetryStrategy.java | 23 +- .../stages/utils/RetryableStageHelper2.java | 22 +- .../internal/retry/RetryPolicyAdapter.java | 26 +- .../retry/SdkDefaultRetryStrategy.java | 20 +- .../amazon/awssdk/core/retry/RetryMode.java | 26 +- .../amazon/awssdk/core/retry/RetryPolicy.java | 4 + .../dynamodb/DynamoDbRetryPolicy.java | 44 +- .../retry/Adaptive2ModeCorrectnessTest.java | 190 +++++++ .../services/retry/AsyncRetrySetupTest.java | 47 ++ .../services/retry/BaseRetrySetupTest.java | 463 ++++++++++++++++++ .../services/retry/SyncRetrySetupTest.java | 38 ++ 11 files changed, 866 insertions(+), 37 deletions(-) create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/Adaptive2ModeCorrectnessTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetrySetupTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetrySetupTest.java diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java index f649cf59e17a..16892c74ad1f 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.awscore.internal.AwsErrorCode; +import software.amazon.awssdk.core.internal.retry.RetryPolicyAdapter; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.retries.AdaptiveRetryStrategy; @@ -54,10 +55,12 @@ private AwsRetryStrategy() { switch (mode) { case STANDARD: return standardRetryStrategy(); - case ADAPTIVE: + case ADAPTIVE_V2: return adaptiveRetryStrategy(); case LEGACY: return legacyRetryStrategy(); + case ADAPTIVE: + return legacyAdaptiveRetryStrategy(); default: throw new IllegalArgumentException("unknown retry mode: " + mode); } @@ -84,7 +87,6 @@ private AwsRetryStrategy() { return DefaultRetryStrategy.none(); } - /** * Returns a {@link StandardRetryStrategy} with AWS-specific conditions added. * @@ -121,8 +123,8 @@ public static AdaptiveRetryStrategy adaptiveRetryStrategy() { * Configures a retry strategy using its builder to add AWS-specific retry exceptions. * * @param builder The builder to add the AWS-specific retry exceptions + * @param The type of the builder extending {@link RetryStrategy.Builder} * @return The given builder - * @param The type of the builder extending {@link RetryStrategy.Builder} */ public static > T configure(T builder) { return builder.retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors); @@ -135,6 +137,9 @@ public static AdaptiveRetryStrategy adaptiveRetryStrategy() { * @return The given builder */ public static RetryStrategy.Builder configureStrategy(RetryStrategy.Builder builder) { + if (builder instanceof RetryPolicyAdapter.Builder) { + return builder; + } return builder.retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors); } @@ -145,4 +150,16 @@ private static boolean retryOnAwsRetryableErrors(Throwable ex) { } return false; } + + /** + * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * + * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + */ + private static RetryStrategy legacyAdaptiveRetryStrategy() { + return RetryPolicyAdapter.builder() + .retryPolicy(AwsRetryPolicy.forRetryMode(RetryMode.ADAPTIVE)) + .build(); + } + } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java index 8022ee50ab5c..ce478cd4b84c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java @@ -60,7 +60,6 @@ public final class RetryableStageHelper2 { public static final String SDK_RETRY_INFO_HEADER = "amz-sdk-request"; private final SdkHttpFullRequest request; private final RequestExecutionContext context; - private final RetryPolicy retryPolicy; private RetryPolicyAdapter retryPolicyAdapter; private final RetryStrategy retryStrategy; private final HttpClientDependencies dependencies; @@ -74,8 +73,16 @@ public RetryableStageHelper2(SdkHttpFullRequest request, HttpClientDependencies dependencies) { this.request = request; this.context = context; - this.retryPolicy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_POLICY); - this.retryStrategy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_STRATEGY); + RetryPolicy retryPolicy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_POLICY); + RetryStrategy retryStrategy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_STRATEGY); + if (retryPolicy != null) { + retryPolicyAdapter = RetryPolicyAdapter.builder() + .retryPolicy(retryPolicy) + .build(); + } else if (retryStrategy instanceof RetryPolicyAdapter) { + retryPolicyAdapter = (RetryPolicyAdapter) retryStrategy; + } + this.retryStrategy = retryStrategy; this.dependencies = dependencies; } @@ -256,15 +263,14 @@ private int retriesAttemptedSoFar() { * calling code. */ private RetryStrategy retryStrategy() { - if (retryPolicy != null) { - if (retryPolicyAdapter == null) { - retryPolicyAdapter = RetryPolicyAdapter.builder() - .retryPolicy(this.retryPolicy) + if (retryPolicyAdapter != null) { + if (retryPolicyAdapter.isInitialized()) { + retryPolicyAdapter = retryPolicyAdapter.toBuilder() .retryPolicyContext(retryPolicyContext()) .build(); } else { retryPolicyAdapter = retryPolicyAdapter.toBuilder() - .retryPolicyContext(retryPolicyContext()) + .initialize(retryPolicyContext()) .build(); } return retryPolicyAdapter; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java index 0cc388a343b6..e556446f3091 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java @@ -42,25 +42,26 @@ */ @SdkInternalApi public final class RetryPolicyAdapter implements RetryStrategy { - private final RetryPolicy retryPolicy; private final RetryPolicyContext retryPolicyContext; private final RateLimitingTokenBucket rateLimitingTokenBucket; private RetryPolicyAdapter(Builder builder) { this.retryPolicy = Validate.paramNotNull(builder.retryPolicy, "retryPolicy"); - this.retryPolicyContext = Validate.paramNotNull(builder.retryPolicyContext, "retryPolicyContext"); + this.retryPolicyContext = builder.retryPolicyContext; this.rateLimitingTokenBucket = builder.rateLimitingTokenBucket; } @Override public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { + validateState(); RetryPolicyAdapterToken token = new RetryPolicyAdapterToken(request.scope()); return AcquireInitialTokenResponse.create(token, rateLimitingTokenAcquire()); } @Override public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest request) { + validateState(); RetryPolicyAdapterToken token = getToken(request.token()); boolean willRetry = retryPolicy.aggregateRetryCondition().shouldRetry(retryPolicyContext); if (!willRetry) { @@ -73,6 +74,7 @@ public RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenRequest requ @Override public RecordSuccessResponse recordSuccess(RecordSuccessRequest request) { + validateState(); RetryPolicyAdapterToken token = getToken(request.token()); retryPolicy.aggregateRetryCondition().requestSucceeded(retryPolicyContext); return RecordSuccessResponse.create(token); @@ -88,6 +90,16 @@ public Builder toBuilder() { return new Builder(this); } + public boolean isInitialized() { + return retryPolicyContext != null; + } + + void validateState() { + if (retryPolicyContext == null) { + throw new IllegalStateException("This RetryPolicyAdapter instance has not been initialized."); + } + } + RetryPolicyAdapterToken getToken(RetryToken token) { return Validate.isInstanceOf(RetryPolicyAdapterToken.class, token, "Object of class %s was not created by this retry " + "strategy", token.getClass().getName()); @@ -146,7 +158,6 @@ public static class Builder implements RetryStrategy.Builder shouldRetry) { @Override public Builder maxAttempts(int maxAttempts) { - throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling retryOnException"); + throw new UnsupportedOperationException("RetryPolicyAdapter does not support calling maxAttempts"); } @Override @@ -175,13 +186,14 @@ public Builder retryPolicy(RetryPolicy retryPolicy) { return this; } - public Builder rateLimitingTokenBucket(RateLimitingTokenBucket rateLimitingTokenBucket) { - this.rateLimitingTokenBucket = rateLimitingTokenBucket; + public Builder retryPolicyContext(RetryPolicyContext retryPolicyContext) { + this.retryPolicyContext = retryPolicyContext; return this; } - public Builder retryPolicyContext(RetryPolicyContext retryPolicyContext) { + public Builder initialize(RetryPolicyContext retryPolicyContext) { this.retryPolicyContext = retryPolicyContext; + this.rateLimitingTokenBucket = new RateLimitingTokenBucket(); return this; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java index ee81eb023a7a..37cb716d230c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java @@ -19,6 +19,7 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.retry.RetryUtils; import software.amazon.awssdk.retries.AdaptiveRetryStrategy; import software.amazon.awssdk.retries.DefaultRetryStrategy; @@ -55,6 +56,8 @@ private SdkDefaultRetryStrategy() { case STANDARD: return standardRetryStrategy(); case ADAPTIVE: + return legacyAdaptiveRetryStrategy(); + case ADAPTIVE_V2: return adaptiveRetryStrategy(); case LEGACY: return legacyRetryStrategy(); @@ -74,11 +77,14 @@ public static RetryMode retryMode(RetryStrategy retryStrategy) { return RetryMode.STANDARD; } if (retryStrategy instanceof AdaptiveRetryStrategy) { - return RetryMode.ADAPTIVE; + return RetryMode.ADAPTIVE_V2; } if (retryStrategy instanceof LegacyRetryStrategy) { return RetryMode.LEGACY; } + if (retryStrategy instanceof RetryPolicyAdapter) { + return RetryMode.ADAPTIVE; + } throw new IllegalArgumentException("unknown retry strategy class: " + retryStrategy.getClass().getName()); } @@ -193,4 +199,16 @@ private static boolean retryOnThrottlingCondition(Throwable ex) { } return false; } + + /** + * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * + * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + */ + private static RetryStrategy legacyAdaptiveRetryStrategy() { + return RetryPolicyAdapter.builder() + .retryPolicy(RetryPolicy.forRetryMode(RetryMode.ADAPTIVE)) + .build(); + } } + diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java index 9cc333df2780..cdcc5ac0bf81 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java @@ -73,7 +73,7 @@ public enum RetryMode { STANDARD, /** - * Adaptive retry mode builds on {@code STANDARD} mode. + * Adaptive retry mode builds on {@link #STANDARD} mode. *

    * Adaptive retry mode dynamically limits the rate of AWS requests to maximize success rate. This may be at the * expense of request latency. Adaptive retry mode is not recommended when predictable latency is important. @@ -84,9 +84,31 @@ public enum RetryMode { * the same client. When using adaptive retry mode, we recommend using a single client per resource. * * @see RetryPolicy#isFastFailRateLimiting() + * @deprecated As of 2.25.xx, replaced by {@link #ADAPTIVE_V2}. The ADAPTIVE implementation has a bug that prevents it + * from remembering its state across requests which is needed to correctly estimate its sending rate. Given that + * this bug has been present since its introduction and that correct version might change the traffic patterns of the SDK we + * deemed too risky to fix this implementation. */ + @Deprecated ADAPTIVE, + /** + * Adaptive V2 retry mode builds on {@link #STANDARD} mode. + *

    + * Adaptive retry mode qdynamically limits the rate of AWS requests to maximize success rate. This may be at the + * expense of request latency. Adaptive V2 retry mode is not recommended when predictable latency is important. + *

    + * {@code ADAPTIVE_V2} mode differs from {@link #ADAPTIVE} mode in the computed delays between calls, including the first + * attempt + * that might be delayed if the algorithm considers that it's needed to increase the odds of a successful response. + *

    + * Warning: Adaptive V2 retry mode assumes that the client is working against a single resource (e.g. one + * DynamoDB Table or one S3 Bucket). If you use a single client for multiple resources, throttling or outages + * associated with one resource will result in increased latency and failures when accessing all other resources via + * the same client. When using adaptive retry mode, we recommend using a single client per resource. + */ + ADAPTIVE_V2, + ; /** @@ -176,6 +198,8 @@ private static Optional fromString(String string) { return Optional.of(STANDARD); case "adaptive": return Optional.of(ADAPTIVE); + case "adaptive_v2": + return Optional.of(ADAPTIVE_V2); default: throw new IllegalStateException("Unsupported retry policy mode configured: " + string); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java index 12678c98b6c4..0999798b3c14 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryPolicy.java @@ -372,6 +372,10 @@ private static final class BuilderImpl implements Builder { private Boolean fastFailRateLimiting; private BuilderImpl(RetryMode retryMode) { + if (retryMode == RetryMode.ADAPTIVE_V2) { + throw new UnsupportedOperationException("ADAPTIVE_V2 is not supported by retry policies, use a RetryStrategy " + + "instead"); + } this.retryMode = retryMode; this.numRetries = SdkDefaultRetrySetting.maxAttempts(retryMode) - 1; this.additionalRetryConditionsAllowed = true; diff --git a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java index e85220e02d61..c2f06acf36e0 100644 --- a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java +++ b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.internal.retry.RetryPolicyAdapter; import software.amazon.awssdk.core.internal.retry.SdkDefaultRetrySetting; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; @@ -76,18 +77,8 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { return configuredRetryPolicy; } - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) - .resolve(); - - return AwsRetryPolicy.forRetryMode(retryMode) - .toBuilder() - .additionalRetryConditionsAllowed(false) - .numRetries(MAX_ERROR_RETRY) - .backoffStrategy(BACKOFF_STRATEGY) - .build(); + RetryMode retryMode = resolveRetryMode(config); + return retryPolicyFor(retryMode); } public static RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { @@ -96,11 +87,13 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { return configuredRetryStrategy; } - RetryMode retryMode = RetryMode.resolver() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) - .resolve(); + RetryMode retryMode = resolveRetryMode(config); + + if (retryMode == RetryMode.ADAPTIVE) { + return RetryPolicyAdapter.builder() + .retryPolicy(retryPolicyFor(retryMode)) + .build(); + } return AwsRetryStrategy.forRetryMode(retryMode) .toBuilder() @@ -108,4 +101,21 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { .backoffStrategy(exponentialDelay(BASE_DELAY, SdkDefaultRetrySetting.MAX_BACKOFF)) .build(); } + + private static RetryPolicy retryPolicyFor(RetryMode retryMode) { + return AwsRetryPolicy.forRetryMode(retryMode) + .toBuilder() + .additionalRetryConditionsAllowed(false) + .numRetries(MAX_ERROR_RETRY) + .backoffStrategy(BACKOFF_STRATEGY) + .build(); + } + + private static RetryMode resolveRetryMode(SdkClientConfiguration config) { + return RetryMode.resolver() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultRetryMode(config.option(SdkClientOption.DEFAULT_RETRY_MODE)) + .resolve(); + } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/Adaptive2ModeCorrectnessTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/Adaptive2ModeCorrectnessTest.java new file mode 100644 index 000000000000..242f77cf004c --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/Adaptive2ModeCorrectnessTest.java @@ -0,0 +1,190 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.withinPercentage; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.common.FileSource; +import com.github.tomakehurst.wiremock.extension.Parameters; +import com.github.tomakehurst.wiremock.extension.ResponseTransformer; +import com.github.tomakehurst.wiremock.http.Request; +import com.github.tomakehurst.wiremock.http.Response; +import java.net.URI; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; + +/** + * Tests that the ADAPTIVE2 mode behaves as designed. The setup is an API that is rate limited, and a single client is used by + * multiple threads to make calls to this API. The ADAPTIVE2 mode should "adapt" its calling rate to closely match the expected + * rate of the API with little overhead (wasted calls). + * + * This test might be brittle depending on the hardware is run-on. If proven so we should + * remove it or tweak the expected assertions. + */ +public class Adaptive2ModeCorrectnessTest { + private WireMockServer wireMock; + private AtomicInteger successful; + private AtomicInteger failed; + + @Test + public void adaptive2RetryModeBehavesCorrectly() throws InterruptedException { + stubResponse(); + ExecutorService executor = Executors.newFixedThreadPool(20); + CapturingInterceptor interceptor = new CapturingInterceptor(); + ProtocolRestJsonClient client = clientBuilder() + .overrideConfiguration(o -> o.addExecutionInterceptor(interceptor) + .retryStrategy(RetryMode.ADAPTIVE_V2)) + .build(); + + int totalRequests = 250; + for (int i = 0; i < totalRequests; ++i) { + executor.execute(callAllTypes(client)); + } + executor.shutdown(); + assertThat(executor.awaitTermination(120, TimeUnit.SECONDS)).isTrue(); + double perceivedAvailability = ((double) successful.get() / totalRequests) * 100; + double overhead = ((double) interceptor.attemptsCount.get() / totalRequests) * 100 - 100; + assertThat(perceivedAvailability).isCloseTo(100.0, withinPercentage(20.0)); + assertThat(overhead).isCloseTo(10.0, withinPercentage(100.0)); + } + + private Runnable callAllTypes(ProtocolRestJsonClient client) { + return () -> { + try { + client.allTypes(); + successful.incrementAndGet(); + } catch (SdkException e) { + failed.incrementAndGet(); + } + }; + } + + private ProtocolRestJsonClientBuilder clientBuilder() { + return ProtocolRestJsonClient + .builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", + "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())); + } + + @Before + public void setup() { + successful = new AtomicInteger(0); + failed = new AtomicInteger(0); + wireMock = new WireMockServer(wireMockConfig() + .extensions(RateLimiterResponseTransformer.class)); + wireMock.start(); + } + + @After + public void tearDown() { + wireMock.stop(); + } + + private void stubResponse() { + wireMock.stubFor(post(anyUrl()) + .willReturn(aResponse() + .withTransformers("rate-limiter-transformer"))); + } + + public static class RateLimiterResponseTransformer extends ResponseTransformer { + private final RateLimiter rateLimiter = new RateLimiter(); + + @Override + public String getName() { + return "rate-limiter-transformer"; + } + + @Override + public Response transform(Request request, Response response, FileSource files, Parameters parameters) { + if (rateLimiter.allowRequest()) { + return Response.Builder.like(response) + .but().body("{}") + .status(200) + .build(); + } + return Response.Builder.like(response) + .but().body("{}") + .status(429) + .build(); + } + } + + static class RateLimiter { + private final long capacity; + private final long refillRate; + private final AtomicLong tokens; + private long lastRefillTimestamp; + + public RateLimiter() { + this.capacity = 50; + this.refillRate = 50; + this.tokens = new AtomicLong(capacity); + this.lastRefillTimestamp = System.currentTimeMillis(); + } + + public synchronized boolean allowRequest() { + int tokensRequested = 1; + refillTokens(); + long currentTokens = tokens.get(); + if (currentTokens >= tokensRequested) { + tokens.getAndAdd(-tokensRequested); + return true; + } + return false; + } + + private void refillTokens() { + long now = System.currentTimeMillis(); + long elapsed = now - lastRefillTimestamp; + long refillAmount = elapsed * refillRate / 1000; + tokens.set(Math.min(capacity, tokens.get() + refillAmount)); + lastRefillTimestamp = now; + } + } + + static class CapturingInterceptor implements ExecutionInterceptor { + private AtomicInteger attemptsCount = new AtomicInteger(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + attemptsCount.incrementAndGet(); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetrySetupTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetrySetupTest.java new file mode 100644 index 000000000000..d5d428a5c80a --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/AsyncRetrySetupTest.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import java.util.List; +import java.util.concurrent.CompletionException; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +public class AsyncRetrySetupTest extends BaseRetrySetupTest { + @Override + protected ProtocolRestJsonAsyncClientBuilder newClientBuilder() { + return ProtocolRestJsonAsyncClient.builder(); + } + + @Override + protected AllTypesResponse callAllTypes(ProtocolRestJsonAsyncClient client, List requestPlugins) { + try { + return client.allTypes(r -> r.overrideConfiguration(c -> { + for (SdkPlugin plugin : requestPlugins) { + c.addPlugin(plugin); + } + })).join(); + } catch (CompletionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } + + throw e; + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java new file mode 100644 index 000000000000..2e7ff6f8abd1 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java @@ -0,0 +1,463 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.WireMockServer; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; +import software.amazon.awssdk.awscore.retry.AwsRetryPolicy; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.core.SdkServiceClientConfiguration; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.internal.retry.RetryPolicyAdapter; +import software.amazon.awssdk.core.internal.retry.SdkDefaultRetryStrategy; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.retries.AdaptiveRetryStrategy; +import software.amazon.awssdk.retries.LegacyRetryStrategy; +import software.amazon.awssdk.retries.StandardRetryStrategy; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; +import software.amazon.awssdk.utils.StringInputStream; + +public abstract class BaseRetrySetupTest> { + + protected WireMockServer wireMock = new WireMockServer(0); + + protected abstract BuilderT newClientBuilder(); + + protected abstract AllTypesResponse callAllTypes(ClientT client, List requestPlugins); + + @ParameterizedTest(name = "{index} {0}") + @MethodSource("allScenarios") + public void testAllScenarios(RetryScenario scenario) { + stubThrottlingResponse(); + setupScenarioBefore(scenario); + ClientT client = setupClientBuilder(scenario).build(); + List requestPlugins = setupRequestPlugins(scenario); + assertThatThrownBy(() -> callAllTypes(client, requestPlugins)) + .isInstanceOf(SdkException.class); + verifyRequestCount(expectedCount(scenario.mode())); + } + + private BuilderT setupClientBuilder(RetryScenario scenario) { + BuilderT builder = clientBuilder(); + RetryImplementation kind = scenario.retryImplementation(); + if (kind == RetryImplementation.POLICY) { + setupRetryPolicy(builder, scenario); + } else if (kind == RetryImplementation.STRATEGY) { + setupRetryStrategy(builder, scenario); + } else { + throw new IllegalArgumentException(); + } + return builder; + } + + private void setupRetryPolicy(BuilderT builder, RetryScenario scenario) { + RetryMode mode = scenario.mode(); + RetryModeSetup setup = scenario.setup(); + switch (setup) { + case PROFILE_USING_MODE: + setupProfile(builder, scenario.mode()); + break; + case CLIENT_OVERRIDE_USING_MODE: + builder.overrideConfiguration(o -> o.retryPolicy(mode)); + break; + case CLIENT_OVERRIDE_USING_INSTANCE: + builder.overrideConfiguration(o -> o.retryPolicy(AwsRetryPolicy.forRetryMode(mode))); + break; + case CLIENT_PLUGIN_OVERRIDE_USING_INSTANCE: + case CLIENT_PLUGIN_OVERRIDE_USING_MODE: + builder.addPlugin(new ConfigureRetryScenario(scenario)); + break; + } + } + + private void setupRetryStrategy(BuilderT builder, RetryScenario scenario) { + RetryMode mode = scenario.mode(); + // Note, we don't setup the request level plugins, those need to be added at request time and not when we build the + // client. + switch (scenario.setup()) { + case PROFILE_USING_MODE: + setupProfile(builder, scenario.mode()); + break; + case CLIENT_OVERRIDE_USING_MODE: + builder.overrideConfiguration(o -> o.retryStrategy(mode)); + break; + case CLIENT_OVERRIDE_USING_INSTANCE: + builder.overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.forRetryMode(mode))); + break; + case CLIENT_PLUGIN_OVERRIDE_USING_INSTANCE: + case CLIENT_PLUGIN_OVERRIDE_USING_MODE: + builder.addPlugin(new ConfigureRetryScenario(scenario)); + break; + } + } + + private void setupProfile(BuilderT builder, RetryMode mode) { + String modeName = mode.toString().toLowerCase(Locale.ROOT); + ProfileFile profileFile = ProfileFile.builder() + .content(new StringInputStream("[profile retry_test]\n" + + "retry_mode = " + modeName)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + builder.overrideConfiguration(o -> o.defaultProfileFile(profileFile) + .defaultProfileName("retry_test")).build(); + + } + + private List setupRequestPlugins(RetryScenario scenario) { + List plugins = new ArrayList<>(); + RetryModeSetup setup = scenario.setup(); + if (setup == RetryModeSetup.REQUEST_PLUGIN_OVERRIDE_USING_MODE + || setup == RetryModeSetup.REQUEST_PLUGIN_OVERRIDE_USING_INSTANCE) { + plugins.add(new ConfigureRetryScenario(scenario)); + } + // Plugin to validate the scenarios, must go after plugin to the configure the retry + // scenario. + plugins.add(new ValidateRetryScenario(scenario)); + return plugins; + } + + private BuilderT clientBuilder() { + StaticCredentialsProvider credentialsProvider = + StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); + return newClientBuilder().credentialsProvider(credentialsProvider) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())); + } + + private void setupScenarioBefore(RetryScenario scenario) { + if (scenario.setup() == RetryModeSetup.SYSTEM_PROPERTY_USING_MODE) { + System.setProperty("aws.retryMode", scenario.mode().name().toLowerCase(Locale.ROOT)); + } + } + + @BeforeEach + private void beforeEach() { + wireMock.start(); + } + + @AfterEach + private void afterEach() { + System.clearProperty("aws.retryMode"); + wireMock.stop(); + } + + private static int expectedCount(RetryMode mode) { + switch (mode) { + case ADAPTIVE: + case ADAPTIVE_V2: + case STANDARD: + return 3; + case LEGACY: + return 4; + default: + throw new IllegalArgumentException(); + } + } + + private void verifyRequestCount(int count) { + wireMock.verify(count, anyRequestedFor(anyUrl())); + } + + private void stubThrottlingResponse() { + wireMock.stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(429))); + } + + /** + * For each base scenario we add each possible setup of the retry mode. + */ + private static List allScenarios() { + List result = new ArrayList<>(); + for (RetryScenario scenario : baseScenarios()) { + for (RetryModeSetup setupMode : RetryModeSetup.values()) { + RetryScenario newScenario = scenario.toBuilder().setup(setupMode).build(); + if (isSupportedScenario(newScenario)) { + result.add(newScenario); + } + } + } + return result; + } + + /** + * Not all scenarios are supported, this methods filter those that are not. + */ + private static boolean isSupportedScenario(RetryScenario scenario) { + // Profile now only returns strategies, not policies, except for ADAPTIVE mode for which an adapter + // is used. That case is tested using RetryImplementation.STRATEGY. + if (scenario.retryImplementation() == RetryImplementation.POLICY + && scenario.setup() == RetryModeSetup.PROFILE_USING_MODE) { + return false; + } + + // Using system properties only returns strategies, not policies, except for ADAPTIVE mode for + // which an adapter is used. That case is tested using RetryImplementation.STRATEGY. + if (scenario.retryImplementation() == RetryImplementation.POLICY + && scenario.setup() == RetryModeSetup.SYSTEM_PROPERTY_USING_MODE) { + return false; + } + + // Retry policies only support the legacy ADAPTIVE mode. + if (scenario.retryImplementation() == RetryImplementation.POLICY + && scenario.mode() == RetryMode.ADAPTIVE_V2) { + return false; + } + + return true; + } + + /** + * Base retry scenarios. + */ + private static List baseScenarios() { + return Arrays.asList( + // Retry Policy + RetryScenario.builder() + .mode(RetryMode.LEGACY) + .retryImplementation(RetryImplementation.POLICY) + .expectedClass(RetryPolicy.class) + .build() + , RetryScenario.builder() + .mode(RetryMode.STANDARD) + .retryImplementation(RetryImplementation.POLICY) + .expectedClass(RetryPolicy.class) + .build() + , RetryScenario.builder() + .mode(RetryMode.ADAPTIVE) + .retryImplementation(RetryImplementation.POLICY) + .expectedClass(RetryPolicy.class) + .build() + // Retry Strategy + , RetryScenario.builder() + .mode(RetryMode.LEGACY) + .retryImplementation(RetryImplementation.STRATEGY) + .expectedClass(LegacyRetryStrategy.class) + .build() + , RetryScenario.builder() + .mode(RetryMode.STANDARD) + .retryImplementation(RetryImplementation.STRATEGY) + .expectedClass(StandardRetryStrategy.class) + .build() + , RetryScenario.builder() + .mode(RetryMode.ADAPTIVE) + .retryImplementation(RetryImplementation.STRATEGY) + .expectedClass(RetryPolicyAdapter.class) + .build() + , RetryScenario.builder() + .mode(RetryMode.ADAPTIVE_V2) + .retryImplementation(RetryImplementation.STRATEGY) + .expectedClass(AdaptiveRetryStrategy.class) + .build() + ); + } + + static class RetryScenario { + private final RetryMode mode; + private final Class expectedClass; + private final RetryModeSetup setup; + private final RetryImplementation retryImplementation; + + RetryScenario(Builder builder) { + this.mode = builder.mode; + this.expectedClass = builder.expectedClass; + this.setup = builder.setup; + this.retryImplementation = builder.retryImplementation; + } + + public RetryMode mode() { + return mode; + } + + public Class expectedClass() { + return expectedClass; + } + + public RetryModeSetup setup() { + return setup; + } + + public RetryImplementation retryImplementation() { + return retryImplementation; + } + + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return mode + " " + retryImplementation + " " + setup; + } + + public static Builder builder() { + return new Builder(); + } + + static class Builder { + private RetryMode mode; + private Class expectedClass; + private RetryModeSetup setup; + private RetryImplementation retryImplementation; + + public Builder() { + } + + public Builder(RetryScenario retrySetup) { + this.mode = retrySetup.mode; + this.expectedClass = retrySetup.expectedClass; + this.setup = retrySetup.setup; + this.retryImplementation = retrySetup.retryImplementation; + } + + public Builder mode(RetryMode mode) { + this.mode = mode; + return this; + } + + public Builder expectedClass(Class expectedClass) { + this.expectedClass = expectedClass; + return this; + } + + public Builder setup(RetryModeSetup setup) { + this.setup = setup; + return this; + } + + public Builder retryImplementation(RetryImplementation retryImplementation) { + this.retryImplementation = retryImplementation; + return this; + } + + public RetryScenario build() { + return new RetryScenario(this); + } + } + } + + enum RetryModeSetup { + CLIENT_OVERRIDE_USING_MODE, + CLIENT_OVERRIDE_USING_INSTANCE, + CLIENT_PLUGIN_OVERRIDE_USING_MODE, + CLIENT_PLUGIN_OVERRIDE_USING_INSTANCE, + REQUEST_PLUGIN_OVERRIDE_USING_MODE, + REQUEST_PLUGIN_OVERRIDE_USING_INSTANCE, + PROFILE_USING_MODE, + SYSTEM_PROPERTY_USING_MODE, + } + + enum RetryImplementation { + POLICY, STRATEGY + } + + static class ConfigureRetryScenario implements SdkPlugin { + private RetryScenario scenario; + + ConfigureRetryScenario(RetryScenario scenario) { + this.scenario = scenario; + } + + @Override + public void configureClient(SdkServiceClientConfiguration.Builder config) { + RetryModeSetup setup = scenario.setup(); + if (setup == RetryModeSetup.CLIENT_PLUGIN_OVERRIDE_USING_MODE + || setup == RetryModeSetup.REQUEST_PLUGIN_OVERRIDE_USING_MODE) { + if (scenario.retryImplementation() == RetryImplementation.POLICY) { + config.overrideConfiguration(o -> o.retryPolicy(scenario.mode())); + } else if (scenario.retryImplementation() == RetryImplementation.STRATEGY) { + config.overrideConfiguration(o -> o.retryStrategy(scenario.mode())); + } else { + throw new IllegalArgumentException(); + } + } else if (setup == RetryModeSetup.CLIENT_PLUGIN_OVERRIDE_USING_INSTANCE + || setup == RetryModeSetup.REQUEST_PLUGIN_OVERRIDE_USING_INSTANCE) { + if (scenario.retryImplementation() == RetryImplementation.POLICY) { + config.overrideConfiguration(o -> o.retryPolicy(AwsRetryPolicy.forRetryMode(scenario.mode()))); + } else if (scenario.retryImplementation() == RetryImplementation.STRATEGY) { + config.overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.forRetryMode(scenario.mode()))); + } else { + throw new IllegalArgumentException(); + } + } + } + } + + static class ValidateRetryScenario implements SdkPlugin { + private RetryScenario scenario; + + public ValidateRetryScenario(RetryScenario scenario) { + this.scenario = scenario; + } + + @Override + public void configureClient(SdkServiceClientConfiguration.Builder config) { + if (scenario.retryImplementation() == RetryImplementation.POLICY) { + assertThat(config.overrideConfiguration().retryPolicy()).isNotEmpty(); + RetryPolicy policy = config.overrideConfiguration().retryPolicy().get(); + assertThat(policy.retryMode()).isEqualTo(scenario.mode()); + assertThat(policy).isInstanceOf(scenario.expectedClass()); + } else if (scenario.retryImplementation() == RetryImplementation.STRATEGY) { + assertThat(config.overrideConfiguration().retryPolicy()).isEmpty(); + assertThat(config.overrideConfiguration().retryStrategy()).isNotEmpty(); + RetryStrategy strategy = config.overrideConfiguration().retryStrategy().get(); + assertThat(SdkDefaultRetryStrategy.retryMode(strategy)).isEqualTo(scenario.mode()); + assertThat(strategy).isInstanceOf(scenario.expectedClass()); + } + } + } + + public static class CapturingInterceptor implements ExecutionInterceptor { + private Context.BeforeTransmission context; + private ExecutionAttributes executionAttributes; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + this.context = context; + this.executionAttributes = executionAttributes; + throw new RuntimeException("boom!"); + } + + public ExecutionAttributes executionAttributes() { + return executionAttributes; + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetrySetupTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetrySetupTest.java new file mode 100644 index 000000000000..a71323c5a713 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/SyncRetrySetupTest.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.retry; + +import java.util.List; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +public class SyncRetrySetupTest extends BaseRetrySetupTest { + @Override + protected ProtocolRestJsonClientBuilder newClientBuilder() { + return ProtocolRestJsonClient.builder(); + } + + @Override + protected AllTypesResponse callAllTypes(ProtocolRestJsonClient client, List requestPlugins) { + return client.allTypes(r -> r.overrideConfiguration(c -> { + for (SdkPlugin plugin : requestPlugins) { + c.addPlugin(plugin); + } + })); + } +} From 927a9000615993273b9983db2c45d79104505b9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Wed, 1 May 2024 14:11:44 -0700 Subject: [PATCH 26/32] Remove a small typo --- .../main/java/software/amazon/awssdk/core/retry/RetryMode.java | 2 +- diff | 0 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 diff diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java index cdcc5ac0bf81..ef84892d21d8 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java @@ -95,7 +95,7 @@ public enum RetryMode { /** * Adaptive V2 retry mode builds on {@link #STANDARD} mode. *

    - * Adaptive retry mode qdynamically limits the rate of AWS requests to maximize success rate. This may be at the + * Adaptive retry mode dynamically limits the rate of AWS requests to maximize success rate. This may be at the * expense of request latency. Adaptive V2 retry mode is not recommended when predictable latency is important. *

    * {@code ADAPTIVE_V2} mode differs from {@link #ADAPTIVE} mode in the computed delays between calls, including the first diff --git a/diff b/diff new file mode 100644 index 000000000000..e69de29bb2d1 From 423c3518a18de6912675c7910efa9a1778a08720 Mon Sep 17 00:00:00 2001 From: John Viegas <70235430+joviegas@users.noreply.github.com> Date: Fri, 10 May 2024 11:32:55 -0700 Subject: [PATCH 27/32] Dumy commit --- .../software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java index 16892c74ad1f..6eb30f765056 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -70,7 +70,7 @@ private AwsRetryStrategy() { * Update the provided {@link RetryStrategy} to add AWS-specific conditions. * * @param strategy The strategy to update - * @return The updated strategy + * @return The updated strategy. */ public static RetryStrategy addRetryConditions(RetryStrategy strategy) { return strategy.toBuilder() From a20250b85729f9c8712bcb46aa148681fb40292b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20Sugawara=20=28=E2=88=A9=EF=BD=80-=C2=B4=29?= =?UTF-8?q?=E2=8A=83=E2=94=81=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E=E7=82=8E?= Date: Fri, 10 May 2024 11:16:52 -0700 Subject: [PATCH 28/32] Dummy commit to kick the internal build --- core/retries/pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/core/retries/pom.xml b/core/retries/pom.xml index fca96098e752..858bd21496ae 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -12,6 +12,7 @@ ~ on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ~ express or implied. See the License for the specific language governing ~ permissions and limitations under the License. + ~ --> Date: Fri, 10 May 2024 12:03:51 -0700 Subject: [PATCH 29/32] Rename retries-api to retries-spi --- core/aws-core/pom.xml | 2 +- core/pom.xml | 2 +- core/{retries-api => retries-spi}/pom.xml | 2 +- .../amazon/awssdk/retries/api/AcquireInitialTokenRequest.java | 0 .../amazon/awssdk/retries/api/AcquireInitialTokenResponse.java | 0 .../software/amazon/awssdk/retries/api/BackoffStrategy.java | 0 .../amazon/awssdk/retries/api/RecordSuccessRequest.java | 0 .../amazon/awssdk/retries/api/RecordSuccessResponse.java | 0 .../amazon/awssdk/retries/api/RefreshRetryTokenRequest.java | 0 .../amazon/awssdk/retries/api/RefreshRetryTokenResponse.java | 0 .../java/software/amazon/awssdk/retries/api/RetryStrategy.java | 0 .../java/software/amazon/awssdk/retries/api/RetryToken.java | 0 .../awssdk/retries/api/TokenAcquisitionFailedException.java | 0 .../retries/api/internal/AcquireInitialTokenRequestImpl.java | 0 .../retries/api/internal/AcquireInitialTokenResponseImpl.java | 0 .../awssdk/retries/api/internal/RecordSuccessRequestImpl.java | 0 .../awssdk/retries/api/internal/RecordSuccessResponseImpl.java | 0 .../retries/api/internal/RefreshRetryTokenRequestImpl.java | 0 .../retries/api/internal/RefreshRetryTokenResponseImpl.java | 0 .../api/internal/backoff/BackoffStrategiesConstants.java | 0 .../api/internal/backoff/ExponentialDelayWithJitter.java | 0 .../api/internal/backoff/ExponentialDelayWithoutJitter.java | 0 .../retries/api/internal/backoff/FixedDelayWithJitter.java | 0 .../retries/api/internal/backoff/FixedDelayWithoutJitter.java | 0 .../amazon/awssdk/retries/api/internal/backoff/Immediately.java | 0 .../amazon/awssdk/retries/api/RetryStrategyBuilderTest.java | 0 .../api/internal/backoff/ExponentialDelayWithJitterTest.java | 0 .../retries/api/internal/backoff/FixedDelayWithJitterTest.java | 0 core/retries/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/s3/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- 37 files changed, 12 insertions(+), 12 deletions(-) rename core/{retries-api => retries-spi}/pom.xml (98%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java (100%) rename core/{retries-api => retries-spi}/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java (100%) rename core/{retries-api => retries-spi}/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java (100%) rename core/{retries-api => retries-spi}/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java (100%) rename core/{retries-api => retries-spi}/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java (100%) diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index ec07b992f4d8..43383f6986c6 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -101,7 +101,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/core/pom.xml b/core/pom.xml index 49430aaff75f..e7db63f6568e 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -53,7 +53,7 @@ endpoints-spi imds crt-core - retries-api + retries-spi retries checksums-spi checksums diff --git a/core/retries-api/pom.xml b/core/retries-spi/pom.xml similarity index 98% rename from core/retries-api/pom.xml rename to core/retries-spi/pom.xml index 8fde08e21f36..c3bb28585b5c 100644 --- a/core/retries-api/pom.xml +++ b/core/retries-spi/pom.xml @@ -24,7 +24,7 @@ 4.0.0 - retries-api + retries-spi AWS Java SDK :: Retries API diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenRequest.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/AcquireInitialTokenResponse.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/BackoffStrategy.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessRequest.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RecordSuccessResponse.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenRequest.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RefreshRetryTokenResponse.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryToken.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/TokenAcquisitionFailedException.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenRequestImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/AcquireInitialTokenResponseImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessRequestImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RecordSuccessResponseImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenRequestImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/RefreshRetryTokenResponseImpl.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/BackoffStrategiesConstants.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitter.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithoutJitter.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitter.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithoutJitter.java diff --git a/core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java similarity index 100% rename from core/retries-api/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java rename to core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/internal/backoff/Immediately.java diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java similarity index 100% rename from core/retries-api/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java rename to core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java b/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java similarity index 100% rename from core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java rename to core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/ExponentialDelayWithJitterTest.java diff --git a/core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java b/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java similarity index 100% rename from core/retries-api/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java rename to core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/internal/backoff/FixedDelayWithJitterTest.java diff --git a/core/retries/pom.xml b/core/retries/pom.xml index 858bd21496ae..803d3f2e7165 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -46,7 +46,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 1445bba4402c..861135403d4c 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -89,7 +89,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 4bba4cf765e1..072e3d97747e 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -64,7 +64,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 56d72393840d..ce461ac38f89 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -63,7 +63,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 4fc0d20e2c07..4cd94657bc6c 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -119,7 +119,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 92bb4eb339e3..a245cab3a3b5 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -142,7 +142,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 140b796206df..c8348c8513a8 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -140,7 +140,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index dedcd50809e7..c4593d600ac4 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -85,7 +85,7 @@ software.amazon.awssdk - retries-api + retries-spi ${awsjavasdk.version} test diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 836ec3cfa736..c1ac8ea82ed7 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -48,7 +48,7 @@ ${awsjavasdk.version} - retries-api + retries-spi software.amazon.awssdk ${awsjavasdk.version} From 4f55d578f24b16acbbed4a2fd2bf096ff04be2e7 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Fri, 10 May 2024 15:25:19 -0700 Subject: [PATCH 30/32] Add retry packages to brazil (#5215) * Add retry packages to brazil * Update pom's as per the new module checklist --- .brazil.json | 2 ++ bom/pom.xml | 10 ++++++++++ pom.xml | 2 ++ 3 files changed, 14 insertions(+) diff --git a/.brazil.json b/.brazil.json index 544e62198ed2..0c610729ca00 100644 --- a/.brazil.json +++ b/.brazil.json @@ -41,6 +41,8 @@ "http-auth-aws": { "packageName": "AwsJavaSdk-Core-HttpAuthAws" }, "http-auth-aws-crt": { "packageName": "AwsJavaSdk-Core-HttpAuthAwsCrt" }, "http-auth-aws-eventstream": { "packageName": "AwsJavaSdk-Core-HttpAuthAwsEventStream" }, + "retries-spi": { "packageName": "AwsJavaSdk-Core-RetriesSpi" }, + "retries": { "packageName": "AwsJavaSdk-Core-Retries" }, "dynamodb": { "packageName": "AwsJavaSdk-DynamoDb" }, "waf": { "packageName": "AwsJavaSdk-Waf" }, diff --git a/bom/pom.xml b/bom/pom.xml index 36a7c700b457..73cd91a5db88 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -172,6 +172,16 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + retries + ${awsjavasdk.version} + + + software.amazon.awssdk + retries-spi + ${awsjavasdk.version} + software.amazon.awssdk apache-client diff --git a/pom.xml b/pom.xml index fa23a7975ed4..0e173e6577b8 100644 --- a/pom.xml +++ b/pom.xml @@ -652,6 +652,8 @@ cloudwatch-metric-publisher utils imds + retries + retries-spi dynamodb-enhanced From c8ac21eeb638f8e4fa34ea7ba1d99e155b3dc5fb Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Tue, 4 Jun 2024 11:29:19 -0700 Subject: [PATCH 31/32] =?UTF-8?q?Remove=20type=20params=20from=20RetryStra?= =?UTF-8?q?tegy,=20but=20keep=20them=20in=20RetryStrategy=E2=80=A6=20(#526?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove type params from RetryStrategy, but keep them in RetryStrategy.Builder * Rename from `none` to `doNotRetry` to clarify the behavior --- .../builder/AwsDefaultClientBuilder.java | 4 +- .../awscore/retry/AwsRetryStrategy.java | 20 ++++----- .../awssdk/retries/api/RetryStrategy.java | 16 ++----- .../retries/api/RetryStrategyBuilderTest.java | 2 +- .../awssdk/retries/AdaptiveRetryStrategy.java | 2 +- .../awssdk/retries/DefaultRetryStrategy.java | 4 +- .../awssdk/retries/LegacyRetryStrategy.java | 2 +- .../awssdk/retries/StandardRetryStrategy.java | 2 +- .../retries/internal/BaseRetryStrategy.java | 45 ++++++------------- .../DefaultAdaptiveRetryStrategy.java | 2 +- .../internal/DefaultLegacyRetryStrategy.java | 2 +- .../DefaultStandardRetryStrategy.java | 2 +- ...ategyCircuitBreakerRemembersStateTest.java | 2 +- .../internal/RetryStrategyCommonTest.java | 4 +- ...azonHttpClientSslHandshakeTimeoutTest.java | 2 +- ...tionPoolMaxConnectionsIntegrationTest.java | 2 +- .../builder/SdkDefaultClientBuilder.java | 4 +- .../config/ClientOverrideConfiguration.java | 10 ++--- .../stages/utils/RetryableStageHelper2.java | 6 +-- .../internal/retry/RetryPolicyAdapter.java | 2 +- .../retry/SdkDefaultRetryStrategy.java | 12 ++--- .../AsyncClientHandlerExceptionTest.java | 2 +- .../handler/AsyncClientHandlerTest.java | 2 +- .../client/handler/SyncClientHandlerTest.java | 2 +- .../AsyncHttpClientApiCallTimeoutTests.java | 2 +- .../HttpClientApiCallAttemptTimeoutTest.java | 2 +- .../timers/HttpClientApiCallTimeoutTest.java | 3 +- .../src/test/java/utils/HttpTestUtils.java | 8 ++-- .../dynamodb/DynamoDbRetryPolicy.java | 4 +- .../dynamodb/DynamoDbRetryPolicyTest.java | 14 +++--- .../internal/crt/DefaultS3CrtAsyncClient.java | 2 +- .../amazon/awssdk/services/SdkPluginTest.java | 2 +- .../services/retry/BaseRetrySetupTest.java | 2 +- .../tests/AsyncResponseThreadingTest.java | 4 +- .../SyncClientConnectionInterruptionTest.java | 3 +- .../tests/retry/AsyncAwsJsonRetryTest.java | 2 +- .../tests/retry/AwsJsonRetryTest.java | 2 +- .../AsyncApiCallAttemptsTimeoutTest.java | 2 +- .../async/AsyncApiCallTimeoutTest.java | 4 +- .../sync/SyncApiCallAttemptTimeoutTest.java | 2 +- .../timeout/sync/SyncApiCallTimeoutTest.java | 2 +- ...ingOperationApiCallAttemptTimeoutTest.java | 2 +- ...cStreamingOperationApiCallTimeoutTest.java | 2 +- .../CloudWatchCrtAsyncStabilityTest.java | 2 +- .../CloudWatchNettyAsyncStabilityTest.java | 2 +- ...ncWithCrtAsyncHttpClientStabilityTest.java | 2 +- .../tests/s3/S3NettyAsyncStabilityTest.java | 2 +- .../tests/sqs/SqsCrtAsyncStabilityTest.java | 2 +- 48 files changed, 100 insertions(+), 127 deletions(-) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java index b4c1d76b088e..a8adecea5119 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/builder/AwsDefaultClientBuilder.java @@ -348,7 +348,7 @@ private void configureRetryPolicy(SdkClientConfiguration.Builder config) { } private void configureRetryStrategy(SdkClientConfiguration.Builder config) { - RetryStrategy strategy = config.option(SdkClientOption.RETRY_STRATEGY); + RetryStrategy strategy = config.option(SdkClientOption.RETRY_STRATEGY); if (strategy != null) { config.option(SdkClientOption.RETRY_STRATEGY, AwsRetryStrategy.configureStrategy(strategy.toBuilder()).build()); return; @@ -356,7 +356,7 @@ private void configureRetryStrategy(SdkClientConfiguration.Builder config) { config.lazyOption(SdkClientOption.RETRY_STRATEGY, this::resolveAwsRetryStrategy); } - private RetryStrategy resolveAwsRetryStrategy(LazyValueSource config) { + private RetryStrategy resolveAwsRetryStrategy(LazyValueSource config) { RetryMode retryMode = RetryMode.resolver() .profileFile(config.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) .profileName(config.get(SdkClientOption.PROFILE_NAME)) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java index 6eb30f765056..510c77ae030b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -41,7 +41,7 @@ private AwsRetryStrategy() { * * @return The default retry strategy. */ - public static RetryStrategy defaultRetryStrategy() { + public static RetryStrategy defaultRetryStrategy() { return forRetryMode(RetryMode.defaultRetryMode()); } @@ -51,7 +51,7 @@ private AwsRetryStrategy() { * @param mode The retry mode for which we want to create a retry strategy. * @return A retry strategy for the given retry mode. */ - public static RetryStrategy forRetryMode(RetryMode mode) { + public static RetryStrategy forRetryMode(RetryMode mode) { switch (mode) { case STANDARD: return standardRetryStrategy(); @@ -72,19 +72,19 @@ private AwsRetryStrategy() { * @param strategy The strategy to update * @return The updated strategy. */ - public static RetryStrategy addRetryConditions(RetryStrategy strategy) { + public static RetryStrategy addRetryConditions(RetryStrategy strategy) { return strategy.toBuilder() .retryOnException(AwsRetryStrategy::retryOnAwsRetryableErrors) .build(); } /** - * Returns a retry strategy that does not retry. + * Returns a retry strategy that do not retry. * - * @return A retry strategy that does not retry. + * @return A retry strategy that do not retry. */ - public static RetryStrategy none() { - return DefaultRetryStrategy.none(); + public static RetryStrategy doNotRetry() { + return DefaultRetryStrategy.doNotRetry(); } /** @@ -152,11 +152,11 @@ private static boolean retryOnAwsRetryableErrors(Throwable ex) { } /** - * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. * - * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. */ - private static RetryStrategy legacyAdaptiveRetryStrategy() { + private static RetryStrategy legacyAdaptiveRetryStrategy() { return RetryPolicyAdapter.builder() .retryPolicy(AwsRetryPolicy.forRetryMode(RetryMode.ADAPTIVE)) .build(); diff --git a/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java index 016ddff2d1b1..d2653d0ff6d6 100644 --- a/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java +++ b/core/retries-spi/src/main/java/software/amazon/awssdk/retries/api/RetryStrategy.java @@ -18,8 +18,6 @@ import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.utils.builder.CopyableBuilder; -import software.amazon.awssdk.utils.builder.ToCopyableBuilder; /** * A strategy used by an SDK to determine when something should be retried. @@ -41,10 +39,7 @@ */ @ThreadSafe @SdkPublicApi -public interface RetryStrategy< - B extends CopyableBuilder & RetryStrategy.Builder, - T extends ToCopyableBuilder & RetryStrategy> - extends ToCopyableBuilder { +public interface RetryStrategy { /** * Invoked before the first request attempt. * @@ -95,16 +90,14 @@ public interface RetryStrategy< * *

    This is useful for modifying the strategy's behavior, like conditions or max retries. */ - @Override - B toBuilder(); + Builder toBuilder(); /** * Builder to create immutable instances of {@link RetryStrategy}. */ interface Builder< - B extends Builder & CopyableBuilder, - T extends ToCopyableBuilder & RetryStrategy> - extends CopyableBuilder { + B extends Builder, + T extends RetryStrategy> { /** * Configure the strategy to retry when the provided predicate returns true, given a failure exception. */ @@ -214,7 +207,6 @@ default B retryOnRootCauseInstanceOf(Class throwable) { /** * Build a new {@link RetryStrategy} with the current configuration on this builder. */ - @Override T build(); } } diff --git a/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java b/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java index 99c9ec7ae516..85e1113245d3 100644 --- a/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java +++ b/core/retries-spi/src/test/java/software/amazon/awssdk/retries/api/RetryStrategyBuilderTest.java @@ -164,7 +164,7 @@ public DummyRetryStrategy build() { } } - static class DummyRetryStrategy implements RetryStrategy { + static class DummyRetryStrategy implements RetryStrategy { @Override public AcquireInitialTokenResponse acquireInitialToken(AcquireInitialTokenRequest request) { diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java index 9c16aed4d04e..1aeb4eea4c04 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/AdaptiveRetryStrategy.java @@ -48,7 +48,7 @@ */ @SdkPublicApi @ThreadSafe -public interface AdaptiveRetryStrategy extends RetryStrategy { +public interface AdaptiveRetryStrategy extends RetryStrategy { /** * Create a new {@link AdaptiveRetryStrategy.Builder}. diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index 7df5c5cf710a..5574e0e22b5d 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -30,9 +30,9 @@ private DefaultRetryStrategy() { } /** - * Creates a non-retrying strategy. + * Returns a retry strategy that do not retry. */ - public static StandardRetryStrategy none() { + public static StandardRetryStrategy doNotRetry() { return standardStrategyBuilder() .maxAttempts(1) .build(); diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java index 91b13d4f1d2a..53c50c2d3aa5 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/LegacyRetryStrategy.java @@ -45,7 +45,7 @@ */ @SdkPublicApi @ThreadSafe -public interface LegacyRetryStrategy extends RetryStrategy { +public interface LegacyRetryStrategy extends RetryStrategy { /** * Create a new {@link LegacyRetryStrategy.Builder}. * diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java index ae2b38feea68..2440c998195c 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/StandardRetryStrategy.java @@ -42,7 +42,7 @@ */ @SdkPublicApi @ThreadSafe -public interface StandardRetryStrategy extends RetryStrategy { +public interface StandardRetryStrategy extends RetryStrategy { /** * Create a new {@link StandardRetryStrategy.Builder}. * diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java index 0c312a31388c..260f46f28730 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/BaseRetryStrategy.java @@ -38,18 +38,13 @@ import software.amazon.awssdk.retries.internal.circuitbreaker.TokenBucketStore; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; -import software.amazon.awssdk.utils.builder.CopyableBuilder; -import software.amazon.awssdk.utils.builder.ToCopyableBuilder; /** - * Generic class that implements that common logic for all the retries - * strategies with extension points for specific strategies to tailor - * the behavior to its needs. + * Generic class that implements that common logic for all the retries strategies with extension points for specific strategies to + * tailor the behavior to its needs. */ @SdkInternalApi -public abstract class BaseRetryStrategy< - B extends CopyableBuilder & RetryStrategy.Builder, - T extends ToCopyableBuilder & RetryStrategy> implements RetryStrategy { +public abstract class BaseRetryStrategy implements RetryStrategy { protected final Logger log; protected final List> retryPredicates; @@ -70,8 +65,7 @@ public abstract class BaseRetryStrategy< } /** - * This method implements the logic of {@link - * RetryStrategy#acquireInitialToken(AcquireInitialTokenRequest)}. + * This method implements the logic of {@link RetryStrategy#acquireInitialToken(AcquireInitialTokenRequest)}. * * @see RetryStrategy#acquireInitialToken(AcquireInitialTokenRequest) */ @@ -83,8 +77,7 @@ public final AcquireInitialTokenResponse acquireInitialToken(AcquireInitialToken } /** - * This method implements the logic of {@link - * RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)}. + * This method implements the logic of {@link RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest)}. * * @see RetryStrategy#refreshRetryToken(RefreshRetryTokenRequest) */ @@ -115,8 +108,7 @@ public final RefreshRetryTokenResponse refreshRetryToken(RefreshRetryTokenReques } /** - * This method implements the logic of {@link - * RetryStrategy#recordSuccess(RecordSuccessRequest)}. + * This method implements the logic of {@link RetryStrategy#recordSuccess(RecordSuccessRequest)}. * * @see RetryStrategy#recordSuccess(RecordSuccessRequest) */ @@ -143,24 +135,18 @@ public int maxAttempts() { return maxAttempts; } - @Override - public abstract B toBuilder(); /** - * Computes the backoff before the first attempt, by default - * {@link Duration#ZERO}. Extending classes can override - * this method to compute different a different depending on their - * logic. + * Computes the backoff before the first attempt, by default {@link Duration#ZERO}. Extending classes can override this method + * to compute different a different depending on their logic. */ protected Duration computeInitialBackoff(AcquireInitialTokenRequest request) { return Duration.ZERO; } /** - * Computes the backoff before a retry using the configured - * backoff strategy. Extending classes can override - * this method to compute different a different depending on their - * logic. + * Computes the backoff before a retry using the configured backoff strategy. Extending classes can override this method to + * compute different a different depending on their logic. */ protected Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetryToken token) { Duration backoff = backoffStrategy.computeDelay(token.attempt()); @@ -169,24 +155,21 @@ protected Duration computeBackoff(RefreshRetryTokenRequest request, DefaultRetry } /** - * Called inside {@link #recordSuccess} to allow extending classes - * to update their internal state after a successful request. + * Called inside {@link #recordSuccess} to allow extending classes to update their internal state after a successful request. */ protected void updateStateForSuccess(DefaultRetryToken token) { } /** - * Called inside {@link #refreshRetryToken} to allow extending - * classes to update their internal state before retrying a + * Called inside {@link #refreshRetryToken} to allow extending classes to update their internal state before retrying a * request. */ protected void updateStateForRetry(RefreshRetryTokenRequest request) { } /** - * Returns the amount of tokens to withdraw from the token - * bucket. Extending classes can override this method to tailor - * this amount for the specific kind of failure. + * Returns the amount of tokens to withdraw from the token bucket. Extending classes can override this method to tailor this + * amount for the specific kind of failure. */ protected int exceptionCost(RefreshRetryTokenRequest request) { if (circuitBreakerEnabled) { diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java index 137b264496ba..63b5b920f03c 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultAdaptiveRetryStrategy.java @@ -30,7 +30,7 @@ @SdkInternalApi public final class DefaultAdaptiveRetryStrategy - extends BaseRetryStrategy implements AdaptiveRetryStrategy { + extends BaseRetryStrategy implements AdaptiveRetryStrategy { private static final Logger LOG = Logger.loggerFor(DefaultAdaptiveRetryStrategy.class); private final Predicate treatAsThrottling; diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java index 94c072bd63ea..7e75d3b17a95 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultLegacyRetryStrategy.java @@ -27,7 +27,7 @@ @SdkInternalApi public final class DefaultLegacyRetryStrategy - extends BaseRetryStrategy implements LegacyRetryStrategy { + extends BaseRetryStrategy implements LegacyRetryStrategy { private static final Logger LOG = Logger.loggerFor(LegacyRetryStrategy.class); private final BackoffStrategy throttlingBackoffStrategy; private final int throttlingExceptionCost; diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java index fee90141a455..5bd15001989b 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/internal/DefaultStandardRetryStrategy.java @@ -24,7 +24,7 @@ @SdkInternalApi public final class DefaultStandardRetryStrategy - extends BaseRetryStrategy implements StandardRetryStrategy { + extends BaseRetryStrategy implements StandardRetryStrategy { private static final Logger LOG = Logger.loggerFor(DefaultStandardRetryStrategy.class); DefaultStandardRetryStrategy(Builder builder) { diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java index 92ca99e1c9d5..bff1d7e5f906 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCircuitBreakerRemembersStateTest.java @@ -49,7 +49,7 @@ void circuitBreakerRemembersState(Function defaultTestCaseSupp // The test case will throw twice and then succeed, so each run will withdraw 2 * TEST_EXCEPTION_COST and deposit back // TEST_EXCEPTION_COST. - RetryStrategy strategy = testCase.builder.build(); + RetryStrategy strategy = testCase.builder.build(); int total = TEST_MAX; for (int idx = 0; idx < 9; idx++) { String name = testCase.name + " round " + idx; diff --git a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java index e9fc3f8b56fc..87bdb46d19c1 100644 --- a/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java +++ b/core/retries/src/test/java/software/amazon/awssdk/retries/internal/RetryStrategyCommonTest.java @@ -230,11 +230,11 @@ public TestCase expectLastRecordedDelay(Duration delay) { } public void run() { - RetryStrategy strategy = builder.build(); + RetryStrategy strategy = builder.build(); runTestCase(this, strategy); } - public static void runTestCase(TestCase testCase, RetryStrategy strategy) { + public static void runTestCase(TestCase testCase, RetryStrategy strategy) { AcquireInitialTokenResponse res = strategy.acquireInitialToken(AcquireInitialTokenRequestImpl.create(testCase.scope)); RetryToken token = res.token(); testCase.succeeded = false; diff --git a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java index 783b39161f0e..3b82be2be2c8 100644 --- a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java +++ b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/AmazonHttpClientSslHandshakeTimeoutTest.java @@ -48,7 +48,7 @@ public class AmazonHttpClientSslHandshakeTimeoutTest extends UnresponsiveMockSer @Test(timeout = 60 * 1000) public void testSslHandshakeTimeout() { AmazonSyncHttpClient httpClient = HttpTestUtils.testClientBuilder() - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .httpClient(ApacheHttpClient.builder() .socketTimeout(CLIENT_SOCKET_TO) .build()) diff --git a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java index c017392c09c6..942b6c957820 100644 --- a/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java +++ b/core/sdk-core/src/it/java/software/amazon/awssdk/core/http/ConnectionPoolMaxConnectionsIntegrationTest.java @@ -57,7 +57,7 @@ public static void tearDown() { public void leasing_a_new_connection_fails_with_connection_pool_timeout() { AmazonSyncHttpClient httpClient = HttpTestUtils.testClientBuilder() - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .httpClient(ApacheHttpClient.builder() .connectionTimeout(Duration.ofMillis(100)) .maxConnections(1) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 015d26423b57..2a54f8b662c0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -368,7 +368,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { return config; } - private String resolveRetryMode(RetryPolicy retryPolicy, RetryStrategy retryStrategy) { + private String resolveRetryMode(RetryPolicy retryPolicy, RetryStrategy retryStrategy) { if (retryPolicy != null) { return retryPolicy.retryMode().toString(); } @@ -394,7 +394,7 @@ private String resolveClientUserAgent(LazyValueSource config) { retryMode); } - private RetryStrategy resolveRetryStrategy(LazyValueSource config) { + private RetryStrategy resolveRetryStrategy(LazyValueSource config) { RetryMode retryMode = RetryMode.resolver() .profileFile(config.get(PROFILE_FILE_SUPPLIER)) .profileName(config.get(PROFILE_NAME)) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 2e09a2952ccb..891896b78f18 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -214,7 +214,7 @@ public Optional retryPolicy() { * * @see Builder#retryStrategy(RetryStrategy) */ - public Optional> retryStrategy() { + public Optional retryStrategy() { return Optional.ofNullable(config.option(RETRY_STRATEGY)); } @@ -463,7 +463,7 @@ default Builder retryStrategy(RetryMode retryMode) { * * @see ClientOverrideConfiguration#retryStrategy() */ - Builder retryStrategy(RetryStrategy retryStrategy); + Builder retryStrategy(RetryStrategy retryStrategy); /** * Configure the retry strategy that should be used when handling failure cases. @@ -475,7 +475,7 @@ default Builder retryStrategy(Consumer> mutator) { return retryStrategy(builder.build()); } - RetryStrategy retryStrategy(); + RetryStrategy retryStrategy(); /** * Configure a list of execution interceptors that will have access to read and modify the request and response objcets as @@ -777,7 +777,7 @@ public RetryPolicy retryPolicy() { } @Override - public Builder retryStrategy(RetryStrategy retryStrategy) { + public Builder retryStrategy(RetryStrategy retryStrategy) { config.option(RETRY_STRATEGY, retryStrategy); return this; } @@ -787,7 +787,7 @@ public void setRetryStrategy(RetryStrategy retryStrategy) { } @Override - public RetryStrategy retryStrategy() { + public RetryStrategy retryStrategy() { return config.option(RETRY_STRATEGY); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java index ce478cd4b84c..13cbb601789b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper2.java @@ -61,7 +61,7 @@ public final class RetryableStageHelper2 { private final SdkHttpFullRequest request; private final RequestExecutionContext context; private RetryPolicyAdapter retryPolicyAdapter; - private final RetryStrategy retryStrategy; + private final RetryStrategy retryStrategy; private final HttpClientDependencies dependencies; private final List exceptionMessageHistory = new ArrayList<>(); private int attemptNumber = 0; @@ -74,7 +74,7 @@ public RetryableStageHelper2(SdkHttpFullRequest request, this.request = request; this.context = context; RetryPolicy retryPolicy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_POLICY); - RetryStrategy retryStrategy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_STRATEGY); + RetryStrategy retryStrategy = dependencies.clientConfiguration().option(SdkClientOption.RETRY_STRATEGY); if (retryPolicy != null) { retryPolicyAdapter = RetryPolicyAdapter.builder() .retryPolicy(retryPolicy) @@ -262,7 +262,7 @@ private int retriesAttemptedSoFar() { * wrap it is returned. This allows this code to be backwards compatible with previously configured retry-policies by the * calling code. */ - private RetryStrategy retryStrategy() { + private RetryStrategy retryStrategy() { if (retryPolicyAdapter != null) { if (retryPolicyAdapter.isInitialized()) { retryPolicyAdapter = retryPolicyAdapter.toBuilder() diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java index e556446f3091..d64a26509615 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/RetryPolicyAdapter.java @@ -41,7 +41,7 @@ * Implements the {@link RetryStrategy} interface by wrapping a {@link RetryPolicy} instance. */ @SdkInternalApi -public final class RetryPolicyAdapter implements RetryStrategy { +public final class RetryPolicyAdapter implements RetryStrategy { private final RetryPolicy retryPolicy; private final RetryPolicyContext retryPolicyContext; private final RateLimitingTokenBucket rateLimitingTokenBucket; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java index 37cb716d230c..93fcbfce8e35 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/retry/SdkDefaultRetryStrategy.java @@ -41,7 +41,7 @@ private SdkDefaultRetryStrategy() { * * @return the default retry strategy for the configured retry mode. */ - public static RetryStrategy defaultRetryStrategy() { + public static RetryStrategy defaultRetryStrategy() { return forRetryMode(RetryMode.defaultRetryMode()); } @@ -51,7 +51,7 @@ private SdkDefaultRetryStrategy() { * @param mode The retry mode for which we want the retry strategy * @return the appropriate retry strategy for the retry mode with AWS-specific conditions added. */ - public static RetryStrategy forRetryMode(RetryMode mode) { + public static RetryStrategy forRetryMode(RetryMode mode) { switch (mode) { case STANDARD: return standardRetryStrategy(); @@ -72,7 +72,7 @@ private SdkDefaultRetryStrategy() { * @param retryStrategy The retry strategy to test for * @return The retry mode for the given strategy */ - public static RetryMode retryMode(RetryStrategy retryStrategy) { + public static RetryMode retryMode(RetryStrategy retryStrategy) { if (retryStrategy instanceof StandardRetryStrategy) { return RetryMode.STANDARD; } @@ -201,11 +201,11 @@ private static boolean retryOnThrottlingCondition(Throwable ex) { } /** - * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * Returns a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. * - * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. + * @return a {@link RetryStrategy} that implements the legacy {@link RetryMode#ADAPTIVE} mode. */ - private static RetryStrategy legacyAdaptiveRetryStrategy() { + private static RetryStrategy legacyAdaptiveRetryStrategy() { return RetryPolicyAdapter.builder() .retryPolicy(RetryPolicy.forRetryMode(RetryMode.ADAPTIVE)) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java index f05f5f72759d..cc8a94dc6324 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientHandlerExceptionTest.java @@ -88,7 +88,7 @@ public void methodSetup() throws Exception { SdkClientConfiguration config = HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.ASYNC_HTTP_CLIENT, asyncHttpClient) .option(SdkClientOption.RETRY_POLICY, RetryPolicy.none()) - .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.doNotRetry()) .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, Runnable::run) .build(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java index 055d6791a8ea..6482ab936fc2 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/AsyncClientHandlerTest.java @@ -146,7 +146,7 @@ public SdkClientConfiguration clientConfiguration() { return HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.ASYNC_HTTP_CLIENT, httpClient) .option(SdkClientOption.RETRY_POLICY, RetryPolicy.none()) - .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.doNotRetry()) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java index ff9ddc1d8c8e..b4d5f6fc44d6 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/handler/SyncClientHandlerTest.java @@ -205,7 +205,7 @@ private ClientExecutionParams clientExecutionParams() { public SdkClientConfiguration clientConfiguration() { return HttpTestUtils.testClientConfiguration().toBuilder() .option(SdkClientOption.SYNC_HTTP_CLIENT, httpClient) - .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.none()) + .option(SdkClientOption.RETRY_STRATEGY, DefaultRetryStrategy.doNotRetry()) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java index 9af0dd441b0c..8f1ce6a9f437 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java @@ -63,7 +63,7 @@ public class AsyncHttpClientApiCallTimeoutTests { @Before public void setup() { httpClient = testAsyncClientBuilder() - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .apiCallTimeout(API_CALL_TIMEOUT) .build(); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java index a980536313ca..2dcf349495a6 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java @@ -59,7 +59,7 @@ public class HttpClientApiCallAttemptTimeoutTest { @Before public void setup() { httpClient = testClientBuilder() - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .apiCallAttemptTimeout(API_CALL_TIMEOUT) .build(); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java index fbb18ac0b608..9a610485a4d9 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java @@ -42,7 +42,6 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.request.SlowExecutionInterceptor; -import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -60,7 +59,7 @@ public class HttpClientApiCallTimeoutTest { @Before public void setup() { httpClient = testClientBuilder() - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .apiCallTimeout(API_CALL_TIMEOUT) .build(); } diff --git a/core/sdk-core/src/test/java/utils/HttpTestUtils.java b/core/sdk-core/src/test/java/utils/HttpTestUtils.java index b9279c283d2d..6c21a54c227b 100644 --- a/core/sdk-core/src/test/java/utils/HttpTestUtils.java +++ b/core/sdk-core/src/test/java/utils/HttpTestUtils.java @@ -84,13 +84,13 @@ public static SdkClientConfiguration testClientConfiguration() { } public static class TestClientBuilder { - private RetryStrategy retryStrategy; + private RetryStrategy retryStrategy; private SdkHttpClient httpClient; private Map additionalHeaders = new HashMap<>(); private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; - public TestClientBuilder retryStrategy(RetryStrategy retryStrategy) { + public TestClientBuilder retryStrategy(RetryStrategy retryStrategy) { this.retryStrategy = retryStrategy; return this; } @@ -143,13 +143,13 @@ private void configureRetryStrategy(SdkClientConfiguration.Builder builder) { } public static class TestAsyncClientBuilder { - private RetryStrategy retryStrategy; + private RetryStrategy retryStrategy; private SdkAsyncHttpClient asyncHttpClient; private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; private Map additionalHeaders = new HashMap<>(); - public TestAsyncClientBuilder retryStrategy(RetryStrategy retryStrategy) { + public TestAsyncClientBuilder retryStrategy(RetryStrategy retryStrategy) { this.retryStrategy = retryStrategy; return this; } diff --git a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java index c2f06acf36e0..56f812528749 100644 --- a/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java +++ b/services/dynamodb/src/main/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicy.java @@ -81,8 +81,8 @@ public static RetryPolicy resolveRetryPolicy(SdkClientConfiguration config) { return retryPolicyFor(retryMode); } - public static RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { - RetryStrategy configuredRetryStrategy = config.option(SdkClientOption.RETRY_STRATEGY); + public static RetryStrategy resolveRetryStrategy(SdkClientConfiguration config) { + RetryStrategy configuredRetryStrategy = config.option(SdkClientOption.RETRY_STRATEGY); if (configuredRetryStrategy != null) { return configuredRetryStrategy; } diff --git a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java index aabb88a2d0af..8ad3240f9c9e 100644 --- a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java +++ b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java @@ -33,7 +33,7 @@ public void reset() { void test_numRetries_with_standardRetryPolicy() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "standard"); SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); assertThat(retryStrategy.maxAttempts()).isEqualTo(9); } @@ -41,7 +41,7 @@ void test_numRetries_with_standardRetryPolicy() { void test_numRetries_with_legacyRetryPolicy() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "legacy"); SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); assertThat(retryStrategy.maxAttempts()).isEqualTo(9); } @@ -49,7 +49,7 @@ void test_numRetries_with_legacyRetryPolicy() { void resolve_retryModeSetInEnv_doesNotCallSupplier() { environmentVariableHelper.set(SdkSystemSetting.AWS_RETRY_MODE.environmentVariable(), "standard"); SdkClientConfiguration sdkClientConfiguration = SdkClientConfiguration.builder().build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); } @@ -67,7 +67,7 @@ void resolve_retryModeSetWithEnvAndSupplier_resolvesFromEnv() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); @@ -85,7 +85,7 @@ void resolve_retryModeSetWithSupplier_resolvesFromSupplier() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.ADAPTIVE); @@ -103,7 +103,7 @@ void resolve_retryModeSetWithSdkClientOption_resolvesFromSdkClientOption() { .option(SdkClientOption.PROFILE_NAME, "default") .option(SdkClientOption.DEFAULT_RETRY_MODE, RetryMode.STANDARD) .build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.STANDARD); @@ -120,7 +120,7 @@ void resolve_retryModeNotSetWithEnvNorSupplier_resolvesFromSdkDefault() { .option(SdkClientOption.PROFILE_FILE_SUPPLIER, () -> profileFile) .option(SdkClientOption.PROFILE_NAME, "default") .build(); - RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); + RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); assertThat(retryMode).isEqualTo(RetryMode.LEGACY); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java index d7aebf83a48e..57a8eb24b909 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java @@ -115,7 +115,7 @@ private static S3AsyncClient initializeS3AsyncClient(DefaultS3CrtClientBuilder b .putAdvancedOption(SdkAdvancedClientOption.SIGNER, new NoOpSigner()) .putExecutionAttribute(SdkExecutionAttribute.HTTP_RESPONSE_CHECKSUM_VALIDATION, ChecksumValidation.FORCE_SKIP) - .retryStrategy(AwsRetryStrategy.none()) + .retryStrategy(AwsRetryStrategy.doNotRetry()) .addExecutionInterceptor(new ValidateRequestInterceptor()) .addExecutionInterceptor(new AttachHttpAttributesExecutionInterceptor()); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java index 6be31d8b45ca..e6e257473bcd 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/SdkPluginTest.java @@ -197,7 +197,7 @@ public static Stream> testCases() { .beforeTransmissionValidator((r, a, v) -> { v.forEach((key, value) -> assertThat(r.httpRequest().headers().get(key)).isEqualTo(value)); }), - new TestCase>("override.retryStrategy") + new TestCase("override.retryStrategy") .defaultValue(SdkDefaultRetryStrategy.defaultRetryStrategy()) .nonDefaultValue(SdkDefaultRetryStrategy.standardRetryStrategyBuilder().maxAttempts(1).build()) .clientSetter((b, v) -> b.overrideConfiguration(c -> c.retryStrategy(v))) diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java index 2e7ff6f8abd1..bb6897a74c36 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java @@ -438,7 +438,7 @@ public void configureClient(SdkServiceClientConfiguration.Builder config) { } else if (scenario.retryImplementation() == RetryImplementation.STRATEGY) { assertThat(config.overrideConfiguration().retryPolicy()).isEmpty(); assertThat(config.overrideConfiguration().retryStrategy()).isNotEmpty(); - RetryStrategy strategy = config.overrideConfiguration().retryStrategy().get(); + RetryStrategy strategy = config.overrideConfiguration().retryStrategy().get(); assertThat(SdkDefaultRetryStrategy.retryMode(strategy)).isEqualTo(scenario.mode()); assertThat(strategy).isInstanceOf(scenario.expectedClass()); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java index da7e1f4a2946..75157769ec69 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java @@ -85,7 +85,7 @@ public void connectionError_completionWithNioThreadWorksCorrectly() { .endpointOverride(URI.create("http://localhost:" + wireMock.port())) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .asyncConfiguration(c -> c.advancedOption(FUTURE_COMPLETION_EXECUTOR, mockExecutor)) - .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); assertThatThrownBy(() -> @@ -107,7 +107,7 @@ public void serverError_completionWithNioThreadWorksCorrectly() { .region(Region.US_WEST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) - .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.doNotRetry())) .asyncConfiguration(c -> c.advancedOption(FUTURE_COMPLETION_EXECUTOR, mockExecutor)) .build(); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java index f2fe3e0a5a3f..19c9cc046aa9 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java @@ -25,7 +25,6 @@ import com.github.tomakehurst.wiremock.core.WireMockConfiguration; import java.net.URI; import java.time.Duration; -import java.util.List; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -151,7 +150,7 @@ void interruptionDueToApiTimeOut_followed_byInterruptCausesOnlyTimeOutException( ExceptionInThreadRun exception = new ExceptionInThreadRun(); ProtocolRestJsonClient client = getClient(httpClient, Duration.ofMillis(10)) - .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.none())) + .overrideConfiguration(o -> o.retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); unInterruptedSleep(100); // We need to creat a separate thread to interrupt it externally. diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java index 8d0853bef54b..f69446ff1502 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AsyncAwsJsonRetryTest.java @@ -149,7 +149,7 @@ public void retryStrategyNone_shouldNotRetry() { "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.none())) + .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); assertThatThrownBy(() -> clientWithNoRetry.allTypes(AllTypesRequest.builder().build()).join()) diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java index 68bf4cf907c2..29dd97574383 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/retry/AwsJsonRetryTest.java @@ -148,7 +148,7 @@ public void retryStrategyNone_shouldNotRetry() { "skid"))) .region(Region.US_EAST_1) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) - .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.none())) + .overrideConfiguration(c -> c.retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); assertThatThrownBy(() -> clientWithNoRetry.allTypes(AllTypesRequest.builder().build())).isInstanceOf(ProtocolJsonRpcException.class); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java index c407455626a5..615a2cb6f2d8 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallAttemptsTimeoutTest.java @@ -60,7 +60,7 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryStrategy(AwsRetryStrategy.none())) + .retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); clientWithRetry = ProtocolRestJsonAsyncClient.builder() diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java index eefa47f2ed27..610196383386 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/async/AsyncApiCallTimeoutTest.java @@ -57,7 +57,7 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryStrategy(AwsRetryStrategy.none())) + .retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); clientWithRetry = ProtocolRestJsonAsyncClient.builder() @@ -137,7 +137,7 @@ public ProtocolRestJsonAsyncClient createClientWithMockClient(MockAsyncHttpClien .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryStrategy(AwsRetryStrategy.none())) + .retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java index 84725f07c539..4a9fa31d938e 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallAttemptTimeoutTest.java @@ -54,7 +54,7 @@ public void setup() { .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration( b -> b.apiCallAttemptTimeout(API_CALL_ATTEMPT_TIMEOUT) - .retryStrategy(AwsRetryStrategy.none())) + .retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); clientWithRetry = ProtocolRestJsonClient.builder() diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java index 8dc66cea9b37..135f6db74433 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncApiCallTimeoutTest.java @@ -58,7 +58,7 @@ public void setup() { .httpClient(mockClient) .credentialsProvider(() -> AwsBasicCredentials.create("akid", "skid")) .overrideConfiguration(b -> b.apiCallTimeout(TIMEOUT) - .retryStrategy(AwsRetryStrategy.none())) + .retryStrategy(AwsRetryStrategy.doNotRetry())) .build(); clientWithRetry = ProtocolRestJsonClient.builder() diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java index ca1c960cde17..e845d01e5d8e 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallAttemptTimeoutTest.java @@ -33,6 +33,6 @@ Class expectedException() { @Override ClientOverrideConfiguration clientOverrideConfiguration() { return ClientOverrideConfiguration.builder().apiCallAttemptTimeout(Duration.ofMillis(TIMEOUT)) - .retryStrategy(AwsRetryStrategy.none()).build(); + .retryStrategy(AwsRetryStrategy.doNotRetry()).build(); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java index aec142d71bcb..4d740ca79cfa 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/timeout/sync/SyncStreamingOperationApiCallTimeoutTest.java @@ -34,7 +34,7 @@ Class expectedException() { ClientOverrideConfiguration clientOverrideConfiguration() { return ClientOverrideConfiguration.builder() .apiCallTimeout(Duration.ofMillis(TIMEOUT)) - .retryStrategy(AwsRetryStrategy.none()) + .retryStrategy(AwsRetryStrategy.doNotRetry()) .build(); } } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java index d5d93f098703..877e7be2f3cc 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java @@ -47,7 +47,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryStrategy(DefaultRetryStrategy.none())) + .retryStrategy(DefaultRetryStrategy.doNotRetry())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java index 6ee551ee664c..80f864e2893f 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java @@ -44,7 +44,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b // Retry at test level - .retryStrategy(DefaultRetryStrategy.none()) + .retryStrategy(DefaultRetryStrategy.doNotRetry()) .apiCallTimeout(Duration.ofMinutes(1))) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncWithCrtAsyncHttpClientStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncWithCrtAsyncHttpClientStabilityTest.java index 4bee2fc3d8eb..e66a78babbd4 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncWithCrtAsyncHttpClientStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncWithCrtAsyncHttpClientStabilityTest.java @@ -29,7 +29,7 @@ public class S3AsyncWithCrtAsyncHttpClientStabilityTest extends S3AsyncBaseStabi .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryStrategy(DefaultRetryStrategy.none())) + .retryStrategy(DefaultRetryStrategy.doNotRetry())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java index a25e8ddf703d..418566109110 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java @@ -23,7 +23,7 @@ public class S3NettyAsyncStabilityTest extends S3AsyncBaseStabilityTest { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryStrategy(DefaultRetryStrategy.none())) + .retryStrategy(DefaultRetryStrategy.doNotRetry())) .build(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java index 3ee21c8783d7..5d19592cb03f 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java @@ -50,7 +50,7 @@ public static void setup() { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) // Retry at test level - .retryStrategy(DefaultRetryStrategy.none())) + .retryStrategy(DefaultRetryStrategy.doNotRetry())) .build(); queueName = "sqscrtasyncstabilitytests" + System.currentTimeMillis(); From 3b5cdbdca10136eaf42b90b4731b73f98893e911 Mon Sep 17 00:00:00 2001 From: Manuel Sugawara Date: Wed, 5 Jun 2024 10:14:26 -0700 Subject: [PATCH 32/32] External names used for retry modes only support 'adaptive' (#5265) * Externally named retry modes only support 'adaptive' Behind the scenes this will be mapped to RetryMode.ADAPTIVE_V2 which makes it a non-backwards compatible behavioral change. * Sneak in a fix from the previous PR * Fix a test that expects adaptive to map to `RetryMode.ADAPTIVE` * Fix typos in the comments --- .../awscore/retry/AwsRetryStrategy.java | 2 +- .../awssdk/retries/DefaultRetryStrategy.java | 2 +- .../amazon/awssdk/core/retry/RetryMode.java | 2 - .../awssdk/core/retry/RetryModeTest.java | 4 +- .../dynamodb/DynamoDbRetryPolicyTest.java | 2 +- .../services/retry/BaseRetrySetupTest.java | 39 ++++++++++++++++--- 6 files changed, 39 insertions(+), 12 deletions(-) diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java index 510c77ae030b..bef57e64f301 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/retry/AwsRetryStrategy.java @@ -79,7 +79,7 @@ public static RetryStrategy addRetryConditions(RetryStrategy strategy) { } /** - * Returns a retry strategy that do not retry. + * Returns a retry strategy that does not retry. * * @return A retry strategy that do not retry. */ diff --git a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java index 5574e0e22b5d..b09892375119 100644 --- a/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java +++ b/core/retries/src/main/java/software/amazon/awssdk/retries/DefaultRetryStrategy.java @@ -30,7 +30,7 @@ private DefaultRetryStrategy() { } /** - * Returns a retry strategy that do not retry. + * Returns a retry strategy that does not retry. */ public static StandardRetryStrategy doNotRetry() { return standardStrategyBuilder() diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java index ef84892d21d8..fdfe4fa68a82 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/RetryMode.java @@ -197,8 +197,6 @@ private static Optional fromString(String string) { case "standard": return Optional.of(STANDARD); case "adaptive": - return Optional.of(ADAPTIVE); - case "adaptive_v2": return Optional.of(ADAPTIVE_V2); default: throw new IllegalStateException("Unsupported retry policy mode configured: " + string); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java index 5ec5288fd3fa..b5fb23c37ed4 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/retry/RetryModeTest.java @@ -50,10 +50,10 @@ public static Collection data() { // Test resolution new TestData("legacy", null, null, null, RetryMode.LEGACY), new TestData("standard", null, null, null, RetryMode.STANDARD), - new TestData("adaptive", null, null, null, RetryMode.ADAPTIVE), + new TestData("adaptive", null, null, null, RetryMode.ADAPTIVE_V2), new TestData("lEgAcY", null, null, null, RetryMode.LEGACY), new TestData("sTanDaRd", null, null, null, RetryMode.STANDARD), - new TestData("aDaPtIvE", null, null, null, RetryMode.ADAPTIVE), + new TestData("aDaPtIvE", null, null, null, RetryMode.ADAPTIVE_V2), // Test precedence new TestData("standard", "legacy", "PropertySetToLegacy", RetryMode.LEGACY, RetryMode.STANDARD), diff --git a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java index 8ad3240f9c9e..bcb25c2504d6 100644 --- a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java +++ b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/DynamoDbRetryPolicyTest.java @@ -88,7 +88,7 @@ void resolve_retryModeSetWithSupplier_resolvesFromSupplier() { RetryStrategy retryStrategy = DynamoDbRetryPolicy.resolveRetryStrategy(sdkClientConfiguration); RetryMode retryMode = SdkDefaultRetryStrategy.retryMode(retryStrategy); - assertThat(retryMode).isEqualTo(RetryMode.ADAPTIVE); + assertThat(retryMode).isEqualTo(RetryMode.ADAPTIVE_V2); } @Test diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java index bb6897a74c36..980b95ea9c84 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/retry/BaseRetrySetupTest.java @@ -94,7 +94,7 @@ private void setupRetryPolicy(BuilderT builder, RetryScenario scenario) { RetryModeSetup setup = scenario.setup(); switch (setup) { case PROFILE_USING_MODE: - setupProfile(builder, scenario.mode()); + setupProfile(builder, scenario); break; case CLIENT_OVERRIDE_USING_MODE: builder.overrideConfiguration(o -> o.retryPolicy(mode)); @@ -115,7 +115,7 @@ private void setupRetryStrategy(BuilderT builder, RetryScenario scenario) { // client. switch (scenario.setup()) { case PROFILE_USING_MODE: - setupProfile(builder, scenario.mode()); + setupProfile(builder, scenario); break; case CLIENT_OVERRIDE_USING_MODE: builder.overrideConfiguration(o -> o.retryStrategy(mode)); @@ -130,8 +130,8 @@ private void setupRetryStrategy(BuilderT builder, RetryScenario scenario) { } } - private void setupProfile(BuilderT builder, RetryMode mode) { - String modeName = mode.toString().toLowerCase(Locale.ROOT); + private void setupProfile(BuilderT builder, RetryScenario scenario) { + String modeName = scenario.modeExternalName(); ProfileFile profileFile = ProfileFile.builder() .content(new StringInputStream("[profile retry_test]\n" + "retry_mode = " + modeName)) @@ -165,7 +165,7 @@ private BuilderT clientBuilder() { private void setupScenarioBefore(RetryScenario scenario) { if (scenario.setup() == RetryModeSetup.SYSTEM_PROPERTY_USING_MODE) { - System.setProperty("aws.retryMode", scenario.mode().name().toLowerCase(Locale.ROOT)); + System.setProperty("aws.retryMode", scenario.modeExternalName()); } } @@ -236,6 +236,16 @@ private static boolean isSupportedScenario(RetryScenario scenario) { return false; } + // System property or profile do not support the internal "adaptive_v2" name, only adaptive, + // and it's mapped to adaptive_v2. We mark here adaptive using profile or system property + // and map in the tests "adaptive_v2" to "adaptive" such that everything comes together at + // the end. + if (scenario.mode() == RetryMode.ADAPTIVE + && (scenario.setup() == RetryModeSetup.PROFILE_USING_MODE + || scenario.setup() == RetryModeSetup.SYSTEM_PROPERTY_USING_MODE)) { + return false; + } + // Retry policies only support the legacy ADAPTIVE mode. if (scenario.retryImplementation() == RetryImplementation.POLICY && scenario.mode() == RetryMode.ADAPTIVE_V2) { @@ -323,6 +333,25 @@ public Builder toBuilder() { return new Builder(this); } + /** + * Returns the name used externally of the given mode. This name is used in the profile `retry_mode` setting or in the + * system property. Externally, "adaptive" gets mapped to RetryMode.ADAPTIVE_V2, and "adaptive_v2" is an internal name + * only and not supported externally. + */ + public String modeExternalName() { + switch (mode) { + case ADAPTIVE: + case ADAPTIVE_V2: + return "adaptive"; + case LEGACY: + return "legacy"; + case STANDARD: + return "standard"; + default: + throw new RuntimeException("Unsupported mode: " + mode); + } + } + @Override public String toString() { return mode + " " + retryImplementation + " " + setup;